diff --git a/TESTING.asciidoc b/TESTING.asciidoc index a3f92d5db1232..cf4a40713114c 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -42,7 +42,7 @@ In order to start with a different distribution use the `-Drun.distribution` arg To for example start the open source distribution: ------------------------------------- -./gradlew run -Drun.distribution=oss-zip +./gradlew run -Drun.distribution=oss ------------------------------------- ==== License type @@ -631,3 +631,13 @@ inside `/etc/hosts`, e.g.: 255.255.255.255 broadcasthost ::1 localhost ElasticMBP.local` .... + +== Benchmarking + +For changes that might affect the performance characteristics of Elasticsearch +you should also run macrobenchmarks. We maintain a macrobenchmarking tool +called https://github.com/elastic/rally[Rally] +which you can use to measure the performance impact. It comes with a set of +default benchmarks that we also +https://elasticsearch-benchmarks.elastic.co/[run every night]. To get started, +please see https://esrally.readthedocs.io/en/stable/[Rally's documentation]. diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/time/DateFormatterBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/time/DateFormatterBenchmark.java index b30b3ada0ab64..a364a331400a5 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/time/DateFormatterBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/time/DateFormatterBenchmark.java @@ -55,4 +55,3 @@ public TemporalAccessor parseJodaDate() { return jodaFormatter.parse("1234567890"); } } - diff --git a/build.gradle b/build.gradle index 439bd32727d18..dade656f78b82 100644 --- a/build.gradle +++ b/build.gradle @@ -221,14 +221,14 @@ allprojects { "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${version}": ':modules:lang-painless:spi', "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:archives:integ-test-zip', - "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:archives:zip', - "org.elasticsearch.distribution.zip:elasticsearch-oss:${version}": ':distribution:archives:oss-zip', - "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:archives:tar', - "org.elasticsearch.distribution.tar:elasticsearch-oss:${version}": ':distribution:archives:oss-tar', - "org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:packages:rpm', - "org.elasticsearch.distribution.rpm:elasticsearch-oss:${version}": ':distribution:packages:oss-rpm', - "org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:packages:deb', - "org.elasticsearch.distribution.deb:elasticsearch-oss:${version}": ':distribution:packages:oss-deb', + "downloads.zip:elasticsearch:${version}": ':distribution:archives:zip', + "downloads.zip:elasticsearch-oss:${version}": ':distribution:archives:oss-zip', + "downloads.tar:elasticsearch:${version}": ':distribution:archives:tar', + "downloads.tar:elasticsearch-oss:${version}": ':distribution:archives:oss-tar', + "downloads.rpm:elasticsearch:${version}": ':distribution:packages:rpm', + "downloads.rpm:elasticsearch-oss:${version}": ':distribution:packages:oss-rpm', + "downloads.deb:elasticsearch:${version}": ':distribution:packages:deb', + "downloads.deb:elasticsearch-oss:${version}": ':distribution:packages:oss-deb', "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', "org.elasticsearch.xpack.test:feature-aware:${version}": ':x-pack:test:feature-aware', // for transport client @@ -244,13 +244,13 @@ allprojects { bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unreleasedVersion -> Version unreleased = unreleasedVersion.version String snapshotProject = ":distribution:bwc:${unreleasedVersion.gradleProjectName}" - ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${unreleased}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${unreleased}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.deb:elasticsearch:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.rpm:elasticsearch:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.zip:elasticsearch:${unreleased}"] = snapshotProject if (unreleased.onOrAfter('6.3.0')) { - ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch-oss:${unreleased}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch-oss:${unreleased}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch-oss:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.deb:elasticsearch-oss:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.rpm:elasticsearch-oss:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.zip:elasticsearch-oss:${unreleased}"] = snapshotProject } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index b9ff1fd485e05..b98cdd8788bd4 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -178,6 +178,21 @@ if (project != rootProject) { jarHell.enabled = false thirdPartyAudit.enabled = false + configurations { + distribution + } + + dependencies { + distribution project(':distribution:archives:zip') + distribution project(':distribution:archives:oss-zip') + } + + String localDownloads = "${rootProject.buildDir}/local-downloads" + task setupLocalDownloads(type:Copy) { + from configurations.distribution + into localDownloads + } + unitTest { // The test task is configured to runtimeJava version, but build-tools doesn't support all of them, so test // with compiler instead on the ones that are too old. @@ -192,6 +207,7 @@ if (project != rootProject) { dependsOn project.rootProject.allprojects.collect { it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'} } + dependsOn setupLocalDownloads exclude "**/*Tests.class" testClassesDirs = sourceSets.test.output.classesDirs classpath = sourceSets.test.runtimeClasspath @@ -204,6 +220,7 @@ if (project != rootProject) { ).asPath, ) systemProperty 'test.local-test-repo-path', "${rootProject.buildDir}/local-test-repo" + systemProperty 'test.local-test-downloads-path', localDownloads systemProperty 'test.version_under_test', version systemProperty 'test.lucene-snapshot-revision', (versions.lucene =~ /\w+-snapshot-([a-z0-9]+)/)[0][1] } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index c2741ed5819f4..54a14138505cd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -392,7 +392,7 @@ class BuildPlugin implements Plugin { static void requireJavaHome(Task task, int version) { Project rootProject = task.project.rootProject // use root project for global accounting if (rootProject.hasProperty('requiredJavaVersions') == false) { - rootProject.rootProject.ext.requiredJavaVersions = [:].withDefault{key -> return []} + rootProject.rootProject.ext.requiredJavaVersions = [:] rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph -> List messages = [] for (entry in rootProject.requiredJavaVersions) { @@ -407,9 +407,16 @@ class BuildPlugin implements Plugin { if (messages.isEmpty() == false) { throw new GradleException(messages.join('\n')) } + rootProject.rootProject.ext.requiredJavaVersions = null // reset to null to indicate the pre-execution checks have executed } + } else if (rootProject.rootProject.requiredJavaVersions == null) { + // check directly if the version is present since we are already executing + if (rootProject.javaVersions.get(version) == null) { + throw new GradleException("JAVA${version}_HOME required to run task:\n${task}") + } + } else { + rootProject.requiredJavaVersions.getOrDefault(version, []).add(task) } - rootProject.requiredJavaVersions.get(version).add(task) } /** A convenience method for getting java home for a version of java and requiring that version for the given task to execute */ @@ -558,6 +565,12 @@ class BuildPlugin implements Plugin { repos.mavenLocal() } repos.jcenter() + repos.ivy { + url "https://artifacts.elastic.co/downloads" + patternLayout { + artifact "elasticsearch/[module]-[revision](-[classifier]).[ext]" + } + } repos.maven { name "elastic" url "https://artifacts.elastic.co/maven" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 28566662b4674..15cef3f472817 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -34,7 +34,7 @@ public class DocsTestPlugin extends RestTestPlugin { project.pluginManager.apply('elasticsearch.standalone-rest-test') super.apply(project) // The distribution can be configured with -Dtests.distribution on the command line - project.integTestCluster.distribution = System.getProperty('tests.distribution', 'zip') + project.integTestCluster.distribution = System.getProperty('tests.distribution', 'default') // Docs are published separately so no need to assemble project.tasks.assemble.enabled = false Map defaultSubstitutions = [ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 91cee78496e63..0a53787c10597 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -29,7 +29,7 @@ class ClusterConfiguration { private final Project project @Input - String distribution = 'zip' + String distribution = 'default' @Input int numNodes = 1 diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index d87e33166e74d..72041b4e628d3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -18,20 +18,17 @@ */ package org.elasticsearch.gradle.test -import java.util.stream.Collectors import org.apache.tools.ant.DefaultLogger import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties - import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginPropertiesExtension import org.gradle.api.AntBuilder import org.gradle.api.DefaultTask import org.gradle.api.GradleException -import org.gradle.api.InvalidUserDataException import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.artifacts.Configuration @@ -45,7 +42,7 @@ import org.gradle.api.tasks.Exec import java.nio.charset.StandardCharsets import java.nio.file.Paths import java.util.concurrent.TimeUnit - +import java.util.stream.Collectors /** * A helper for creating tasks to build a cluster that is used by a task, and tear down the cluster when the task is finished. */ @@ -89,7 +86,7 @@ class ClusterFormationTasks { Configuration currentDistro = project.configurations.create("${prefix}_elasticsearchDistro") Configuration bwcDistro = project.configurations.create("${prefix}_elasticsearchBwcDistro") Configuration bwcPlugins = project.configurations.create("${prefix}_elasticsearchBwcPlugins") - if (System.getProperty('tests.distribution', 'oss-zip') == 'integ-test-zip') { + if (System.getProperty('tests.distribution', 'oss') == 'integ-test-zip') { throw new Exception("tests.distribution=integ-test-zip is not supported") } configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch) @@ -174,24 +171,31 @@ class ClusterFormationTasks { /** Adds a dependency on the given distribution */ static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) { - if (Version.fromString(elasticsearchVersion).before('6.3.0') && - distro.startsWith('oss-') - ) { + // TEMP HACK + // The oss docs CI build overrides the distro on the command line. This hack handles backcompat until CI is updated. + if (distro.equals('oss-zip')) { + distro = 'oss' + } + if (distro.equals('zip')) { + distro = 'default' + } + // END TEMP HACK + if (['integ-test-zip', 'oss', 'default'].contains(distro) == false) { + throw new GradleException("Unknown distribution: ${distro} in project ${project.path}") + } + Version version = Version.fromString(elasticsearchVersion) + if (version.before('6.3.0') && distro.startsWith('oss-')) { distro = distro.substring('oss-'.length()) } - String packaging = distro - if (distro.contains('tar')) { - packaging = 'tar.gz'\ - } else if (distro.contains('zip')) { - packaging = 'zip' + String group = "downloads.zip" + if (distro.equals("integ-test-zip")) { + group = "org.elasticsearch.distribution.integ-test-zip" } - String subgroup = distro String artifactName = 'elasticsearch' - if (distro.contains('oss')) { + if (distro.equals('oss') && Version.fromString(elasticsearchVersion).onOrAfter('6.3.0')) { artifactName += '-oss' - subgroup = distro.substring('oss-'.length()) } - project.dependencies.add(configuration.name, "org.elasticsearch.distribution.${subgroup}:${artifactName}:${elasticsearchVersion}@${packaging}") + project.dependencies.add(configuration.name, "${group}:${artifactName}:${elasticsearchVersion}@zip") } /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ @@ -314,31 +318,13 @@ class ClusterFormationTasks { elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in the elasticsearch source tree then this should be the version of elasticsearch built by the source tree. If it isn't then Bad Things(TM) will happen. */ - Task extract - - switch (node.config.distribution) { - case 'integ-test-zip': - case 'zip': - case 'oss-zip': - extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { - from { - project.zipTree(configuration.singleFile) - } - into node.baseDir - } - break; - case 'tar': - case 'oss-tar': - extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { - from { - project.tarTree(project.resources.gzip(configuration.singleFile)) - } - into node.baseDir - } - break; - default: - throw new InvalidUserDataException("Unknown distribution: ${node.config.distribution}") + Task extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { + from { + project.zipTree(configuration.singleFile) + } + into node.baseDir } + return extract } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index cd99950902e55..63af1dda03c3d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -29,9 +29,6 @@ import org.gradle.api.Project import java.nio.file.Files import java.nio.file.Path import java.nio.file.Paths - -import static org.elasticsearch.gradle.BuildPlugin.getJavaHome - /** * A container for the files and configuration associated with a single node in a test cluster. */ @@ -125,8 +122,8 @@ class NodeInfo { baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}") pidFile = new File(baseDir, 'es.pid') this.nodeVersion = Version.fromString(nodeVersion) - homeDir = homeDir(baseDir, config.distribution, nodeVersion) - pathConf = pathConf(baseDir, config.distribution, nodeVersion) + homeDir = new File(baseDir, "elasticsearch-${nodeVersion}") + pathConf = new File(homeDir, 'config') if (config.dataDir != null) { dataDir = "${config.dataDir(nodeNum)}" } else { @@ -299,41 +296,4 @@ class NodeInfo { } return dataDir } - - /** Returns the directory elasticsearch home is contained in for the given distribution */ - static File homeDir(File baseDir, String distro, String nodeVersion) { - String path - switch (distro) { - case 'integ-test-zip': - case 'zip': - case 'tar': - case 'oss-zip': - case 'oss-tar': - path = "elasticsearch-${nodeVersion}" - break - case 'rpm': - case 'deb': - path = "${distro}-extracted/usr/share/elasticsearch" - break - default: - throw new InvalidUserDataException("Unknown distribution: ${distro}") - } - return new File(baseDir, path) - } - - static File pathConf(File baseDir, String distro, String nodeVersion) { - switch (distro) { - case 'integ-test-zip': - case 'zip': - case 'oss-zip': - case 'tar': - case 'oss-tar': - return new File(homeDir(baseDir, distro, nodeVersion), 'config') - case 'rpm': - case 'deb': - return new File(baseDir, "${distro}-extracted/etc/elasticsearch") - default: - throw new InvalidUserDataException("Unknown distribution: ${distro}") - } - } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy index cdadc78e9ae0e..c34b2f00f4c65 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy @@ -16,7 +16,7 @@ public class RunTask extends DefaultTask { clusterConfig.httpPort = 9200 clusterConfig.transportPort = 9300 clusterConfig.daemonize = false - clusterConfig.distribution = 'zip' + clusterConfig.distribution = 'default' project.afterEvaluate { ClusterFormationTasks.setup(project, name, this, clusterConfig) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index aabd0cf119285..dfb825afea255 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -193,10 +193,10 @@ class VagrantTestPlugin implements Plugin { UPGRADE_FROM_ARCHIVES.each { // The version of elasticsearch that we upgrade *from* project.dependencies.add(PACKAGING_CONFIGURATION, - "org.elasticsearch.distribution.${it}:elasticsearch:${upgradeFromVersion}@${it}") + "downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}") if (upgradeFromVersion.onOrAfter('6.3.0')) { project.dependencies.add(PACKAGING_CONFIGURATION, - "org.elasticsearch.distribution.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}") + "downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}") } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index fa4415bbe1e91..6cb8f2bd2f220 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -324,8 +324,9 @@ private void configure() { getConfPathSharedData().mkdirs(); getConfPathLogs().mkdirs(); LinkedHashMap config = new LinkedHashMap<>(); - config.put("cluster.name", "cluster-" + safeName(name)); - config.put("node.name", "node-" + safeName(name)); + String nodeName = safeName(name); + config.put("cluster.name",nodeName); + config.put("node.name", nodeName); config.put("path.repo", getConfPathRepo().getAbsolutePath()); config.put("path.data", getConfPathData().getAbsolutePath()); config.put("path.logs", getConfPathLogs().getAbsolutePath()); @@ -342,6 +343,9 @@ private void configure() { if (Version.fromString(version).getMajor() >= 6) { config.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b"); } + if (Version.fromString(version).getMajor() >= 7) { + config.put("cluster.initial_master_nodes", "[" + nodeName + "]"); + } try { Files.write( getConfigFile().toPath(), diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index c13bcc02cbe89..32a50fb4b0750 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -96,7 +96,7 @@ public void apply(Project project) { if (dockerComposeSupported(project) == false) { project.getLogger().warn( "Tests for {} require docker-compose at /usr/local/bin/docker-compose or /usr/bin/docker-compose " + - "but none could not be found so these will be skipped", project.getPath() + "but none could be found so these will be skipped", project.getPath() ); tasks.withType(getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask"), task -> task.setEnabled(false) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index aca9906701150..239e6d37c81e3 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -40,13 +40,15 @@ public class BuildExamplePluginsIT extends GradleIntegrationTestCase { - private static List EXAMPLE_PLUGINS = Collections.unmodifiableList( + private static final List EXAMPLE_PLUGINS = Collections.unmodifiableList( Arrays.stream( Objects.requireNonNull(System.getProperty("test.build-tools.plugin.examples")) .split(File.pathSeparator) ).map(File::new).collect(Collectors.toList()) ); + private static final String BUILD_TOOLS_VERSION = Objects.requireNonNull(System.getProperty("test.version_under_test")); + @Rule public TemporaryFolder tmpDir = new TemporaryFolder(); @@ -96,7 +98,8 @@ public void testCurrentExamplePlugin() throws IOException { private void adaptBuildScriptForTest() throws IOException { // Add the local repo as a build script URL so we can pull in build-tools and apply the plugin under test - // + is ok because we have no other repo and just want to pick up latest + // we need to specify the exact version of build-tools because gradle automatically adds its plugin portal + // which appears to mirror jcenter, opening us up to pulling a "later" version of build-tools writeBuildScript( "buildscript {\n" + " repositories {\n" + @@ -105,7 +108,7 @@ private void adaptBuildScriptForTest() throws IOException { " }\n" + " }\n" + " dependencies {\n" + - " classpath \"org.elasticsearch.gradle:build-tools:+\"\n" + + " classpath \"org.elasticsearch.gradle:build-tools:" + BUILD_TOOLS_VERSION + "\"\n" + " }\n" + "}\n" ); @@ -127,6 +130,9 @@ private void adaptBuildScriptForTest() throws IOException { " maven {\n" + " url \"" + getLocalTestRepoPath() + "\"\n" + " }\n" + + " flatDir {\n" + + " dir '" + getLocalTestDownloadsPath() + "'\n" + + " }\n" + luceneSnapshotRepo + "}\n" ); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index 3e1d0b176b011..f7a0382cec775 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -144,8 +144,16 @@ protected void assertBuildFileDoesNotExists(BuildResult result, String projectNa } protected String getLocalTestRepoPath() { - String property = System.getProperty("test.local-test-repo-path"); - Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests"); + return getLocalTestPath("test.local-test-repo-path"); + } + + protected String getLocalTestDownloadsPath() { + return getLocalTestPath("test.local-test-downloads-path"); + } + + private String getLocalTestPath(String propertyName) { + String property = System.getProperty(propertyName); + Objects.requireNonNull(property, propertyName + " not passed to tests"); File file = new File(property); assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); if (File.separator.equals("\\")) { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 514f75eaa86e9..ee366ac7b7c65 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,11 +21,9 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; -import org.junit.Ignore; import java.util.Arrays; -@Ignore // https://github.com/elastic/elasticsearch/issues/37218 public class TestClustersPluginIT extends GradleIntegrationTestCase { public void testListClusters() { diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 40380af0823b6..778f29686ad67 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0 -lucene = 8.0.0-snapshot-a1c6e642aa +lucene = 8.0.0-snapshot-83f9835 # optional dependencies spatial4j = 0.7 diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index d658e1f0682cd..bed7d30242801 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -37,8 +37,8 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; +import org.elasticsearch.client.indices.GetFieldMappingsRequest; +import org.elasticsearch.client.indices.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -241,11 +241,16 @@ public void getMappingAsync(GetMappingsRequest getMappingsRequest, RequestOption * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response * @throws IOException in case there is a problem sending the request or parsing back the response + * + * @deprecated This method uses an old request object which still refers to types, a deprecated feature. The method + * {@link #getFieldMapping(GetFieldMappingsRequest, RequestOptions)} should be used instead, which accepts a new request object. */ - public GetFieldMappingsResponse getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest, - RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getFieldMappingsRequest, IndicesRequestConverters::getFieldMapping, options, - GetFieldMappingsResponse::fromXContent, emptySet()); + @Deprecated + public org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse getFieldMapping( + org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest getFieldMappingsRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(getFieldMappingsRequest, IndicesRequestConverters::getFieldMapping, + options, org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse::fromXContent, emptySet()); } /** @@ -255,9 +260,45 @@ public GetFieldMappingsResponse getFieldMapping(GetFieldMappingsRequest getField * @param getFieldMappingsRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * + * @deprecated This method uses an old request object which still refers to types, a deprecated feature. The + * method {@link #getFieldMappingAsync(GetFieldMappingsRequest, RequestOptions, ActionListener)} should be used instead, + * which accepts a new request object. + */ + @Deprecated + public void getFieldMappingAsync(org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest getFieldMappingsRequest, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(getFieldMappingsRequest, IndicesRequestConverters::getFieldMapping, options, + org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse::fromXContent, listener, emptySet()); + } + + /** + * Retrieves the field mappings on an index or indices using the Get Field Mapping API. + * See + * Get Field Mapping API on elastic.co + * @param getFieldMappingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetFieldMappingsResponse getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(getFieldMappingsRequest, IndicesRequestConverters::getFieldMapping, + options, GetFieldMappingsResponse::fromXContent, emptySet() + ); + } + + /** + * Asynchronously retrieves the field mappings on an index or indices using the Get Field Mapping API. + * See + * Get Field Mapping API on elastic.co + * @param getFieldMappingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void getFieldMappingAsync(GetFieldMappingsRequest getFieldMappingsRequest, RequestOptions options, - ActionListener listener) { + public void getFieldMappingAsync(GetFieldMappingsRequest getFieldMappingsRequest, + RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(getFieldMappingsRequest, IndicesRequestConverters::getFieldMapping, options, GetFieldMappingsResponse::fromXContent, listener, emptySet()); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index 33041d5e772bc..ece8bdffa2566 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -35,7 +35,7 @@ import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; +import org.elasticsearch.client.indices.GetFieldMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -165,7 +165,28 @@ static Request getMappings(GetMappingsRequest getMappingsRequest) throws IOExcep return request; } - static Request getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest) throws IOException { + static Request getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest) { + String[] indices = getFieldMappingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.indices(); + String[] fields = getFieldMappingsRequest.fields() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.fields(); + + String endpoint = new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(indices) + .addPathPartAsIs("_mapping") + .addPathPartAsIs("field") + .addCommaSeparatedPathParts(fields) + .build(); + + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withIndicesOptions(getFieldMappingsRequest.indicesOptions()); + parameters.withIncludeDefaults(getFieldMappingsRequest.includeDefaults()); + parameters.withLocal(getFieldMappingsRequest.local()); + + return request; + } + + static Request getFieldMapping(org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest getFieldMappingsRequest) { String[] indices = getFieldMappingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.indices(); String[] types = getFieldMappingsRequest.types() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.types(); String[] fields = getFieldMappingsRequest.fields() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.fields(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetFieldMappingsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetFieldMappingsRequest.java new file mode 100644 index 0000000000000..da5fd5c2bc0ae --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetFieldMappingsRequest.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.Strings; + +/** Request the mappings of specific fields */ +public class GetFieldMappingsRequest implements Validatable { + + private boolean local = false; + + private String[] fields = Strings.EMPTY_ARRAY; + + private boolean includeDefaults = false; + + private String[] indices = Strings.EMPTY_ARRAY; + + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + /** + * Indicate whether the receiving node should operate based on local index information or forward requests, + * where needed, to other nodes. If running locally, request will not raise errors if running locally & missing indices. + */ + public GetFieldMappingsRequest local(boolean local) { + this.local = local; + return this; + } + + public boolean local() { + return local; + } + + public GetFieldMappingsRequest indices(String... indices) { + this.indices = indices; + return this; + } + + public GetFieldMappingsRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + public String[] indices() { + return indices; + } + + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + /** @param fields a list of fields to retrieve the mapping for */ + public GetFieldMappingsRequest fields(String... fields) { + this.fields = fields; + return this; + } + + public String[] fields() { + return fields; + } + + public boolean includeDefaults() { + return includeDefaults; + } + + /** Indicates whether default mapping settings should be returned */ + public GetFieldMappingsRequest includeDefaults(boolean includeDefaults) { + this.includeDefaults = includeDefaults; + return this; + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetFieldMappingsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetFieldMappingsResponse.java new file mode 100644 index 0000000000000..b982eab235485 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetFieldMappingsResponse.java @@ -0,0 +1,190 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.Mapper; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/** Response object for {@link GetFieldMappingsRequest} API */ +public class GetFieldMappingsResponse { + + private static final ParseField MAPPINGS = new ParseField("mappings"); + + private static final ObjectParser, String> PARSER = + new ObjectParser<>(MAPPINGS.getPreferredName(), true, HashMap::new); + + static { + PARSER.declareField((p, fieldMappings, index) -> { + p.nextToken(); + while (p.currentToken() == XContentParser.Token.FIELD_NAME) { + final String fieldName = p.currentName(); + final FieldMappingMetaData fieldMappingMetaData = FieldMappingMetaData.fromXContent(p); + fieldMappings.put(fieldName, fieldMappingMetaData); + p.nextToken(); + } + }, MAPPINGS, ObjectParser.ValueType.OBJECT); + } + + private Map> mappings; + + GetFieldMappingsResponse(Map> mappings) { + this.mappings = mappings; + } + + + /** + * Returns the fields mapping. The return map keys are indexes and fields (as specified in the request). + */ + public Map> mappings() { + return mappings; + } + + /** + * Returns the mappings of a specific index and field. + * + * @param field field name as specified in the {@link GetFieldMappingsRequest} + * @return FieldMappingMetaData for the requested field or null if not found. + */ + public FieldMappingMetaData fieldMappings(String index, String field) { + Map indexMapping = mappings.get(index); + if (indexMapping == null) { + return null; + } + return indexMapping.get(field); + } + + + public static GetFieldMappingsResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + final Map> mappings = new HashMap<>(); + if (parser.nextToken() == XContentParser.Token.FIELD_NAME) { + while (parser.currentToken() == XContentParser.Token.FIELD_NAME) { + final String index = parser.currentName(); + final Map fieldMappings = PARSER.parse(parser, index); + mappings.put(index, fieldMappings); + parser.nextToken(); + } + } + return new GetFieldMappingsResponse(mappings); + } + + public static class FieldMappingMetaData { + private static final ParseField FULL_NAME = new ParseField("full_name"); + private static final ParseField MAPPING = new ParseField("mapping"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("field_mapping_meta_data", true, + a -> new FieldMappingMetaData((String)a[0], (BytesReference)a[1]) + ); + + static { + PARSER.declareField(optionalConstructorArg(), + (p, c) -> p.text(), FULL_NAME, ObjectParser.ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), + (p, c) -> { + final XContentBuilder jsonBuilder = jsonBuilder().copyCurrentStructure(p); + final BytesReference bytes = BytesReference.bytes(jsonBuilder); + return bytes; + }, MAPPING, ObjectParser.ValueType.OBJECT); + } + + private String fullName; + private BytesReference source; + + public FieldMappingMetaData(String fullName, BytesReference source) { + this.fullName = fullName; + this.source = source; + } + + public String fullName() { + return fullName; + } + + /** + * Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. + */ + public Map sourceAsMap() { + return XContentHelper.convertToMap(source, true, XContentType.JSON).v2(); + } + + //pkg-private for testing + BytesReference getSource() { + return source; + } + + public static FieldMappingMetaData fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public String toString() { + return "FieldMappingMetaData{fullName='" + fullName + '\'' + ", source=" + source + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof FieldMappingMetaData)) return false; + FieldMappingMetaData that = (FieldMappingMetaData) o; + return Objects.equals(fullName, that.fullName) && Objects.equals(source, that.source); + } + + @Override + public int hashCode() { + return Objects.hash(fullName, source); + } + } + + + @Override + public String toString() { + return "GetFieldMappingsResponse{" + "mappings=" + mappings + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof GetFieldMappingsResponse)) return false; + GetFieldMappingsResponse that = (GetFieldMappingsResponse) o; + return Objects.equals(mappings, that.mappings); + } + + @Override + public int hashCode() { + return Objects.hash(mappings); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index af8a51b4900d8..72bc366e7a9c9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -43,8 +43,8 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; +import org.elasticsearch.client.indices.GetFieldMappingsRequest; +import org.elasticsearch.client.indices.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -93,6 +93,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestPutMappingAction; import java.io.IOException; @@ -500,7 +501,6 @@ public void testGetFieldMapping() throws IOException { GetFieldMappingsRequest getFieldMappingsRequest = new GetFieldMappingsRequest() .indices(indexName) - .types("_doc") .fields("field"); GetFieldMappingsResponse getFieldMappingsResponse = @@ -509,7 +509,7 @@ public void testGetFieldMapping() throws IOException { highLevelClient().indices()::getFieldMappingAsync); final Map fieldMappingMap = - getFieldMappingsResponse.mappings().get(indexName).get("_doc"); + getFieldMappingsResponse.mappings().get(indexName); final GetFieldMappingsResponse.FieldMappingMetaData metaData = new GetFieldMappingsResponse.FieldMappingMetaData("field", @@ -517,6 +517,42 @@ public void testGetFieldMapping() throws IOException { assertThat(fieldMappingMap, equalTo(Collections.singletonMap("field", metaData))); } + public void testGetFieldMappingWithTypes() throws IOException { + String indexName = "test"; + createIndex(indexName, Settings.EMPTY); + + PutMappingRequest putMappingRequest = new PutMappingRequest(indexName); + XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); + mappingBuilder.startObject().startObject("properties").startObject("field"); + mappingBuilder.field("type", "text"); + mappingBuilder.endObject().endObject().endObject(); + putMappingRequest.source(mappingBuilder); + + AcknowledgedResponse putMappingResponse = + execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync); + assertTrue(putMappingResponse.isAcknowledged()); + + org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest getFieldMappingsRequest = + new org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest() + .indices(indexName) + .types("_doc") + .fields("field"); + + org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse getFieldMappingsResponse = + execute(getFieldMappingsRequest, + highLevelClient().indices()::getFieldMapping, + highLevelClient().indices()::getFieldMappingAsync, + expectWarnings(RestGetFieldMappingAction.TYPES_DEPRECATION_MESSAGE)); + + final Map + fieldMappingMap = getFieldMappingsResponse.mappings().get(indexName).get("_doc"); + + final org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData metaData = + new org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData("field", + new BytesArray("{\"field\":{\"type\":\"text\"}}")); + assertThat(fieldMappingMap, equalTo(Collections.singletonMap("field", metaData))); + } + public void testDeleteIndex() throws IOException { { // Delete index if exists @@ -1177,7 +1213,7 @@ public void testIndexPutSettings() throws IOException { createIndex(index, Settings.EMPTY); assertThat(dynamicSetting.getDefault(Settings.EMPTY), not(dynamicSettingValue)); - UpdateSettingsRequest dynamicSettingRequest = new UpdateSettingsRequest(); + UpdateSettingsRequest dynamicSettingRequest = new UpdateSettingsRequest(index); dynamicSettingRequest.settings(Settings.builder().put(dynamicSettingKey, dynamicSettingValue).build()); AcknowledgedResponse response = execute(dynamicSettingRequest, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync); @@ -1187,7 +1223,7 @@ public void testIndexPutSettings() throws IOException { assertThat(indexSettingsAsMap.get(dynamicSettingKey), equalTo(String.valueOf(dynamicSettingValue))); assertThat(staticSetting.getDefault(Settings.EMPTY), not(staticSettingValue)); - UpdateSettingsRequest staticSettingRequest = new UpdateSettingsRequest(); + UpdateSettingsRequest staticSettingRequest = new UpdateSettingsRequest(index); staticSettingRequest.settings(Settings.builder().put(staticSettingKey, staticSettingValue).build()); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(staticSettingRequest, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); @@ -1207,7 +1243,7 @@ public void testIndexPutSettings() throws IOException { assertThat(indexSettingsAsMap.get(staticSettingKey), equalTo(staticSettingValue)); assertThat(unmodifiableSetting.getDefault(Settings.EMPTY), not(unmodifiableSettingValue)); - UpdateSettingsRequest unmodifiableSettingRequest = new UpdateSettingsRequest(); + UpdateSettingsRequest unmodifiableSettingRequest = new UpdateSettingsRequest(index); unmodifiableSettingRequest.settings(Settings.builder().put(unmodifiableSettingKey, unmodifiableSettingValue).build()); exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java index 3410792e56132..3e4dcd0209764 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -38,7 +38,6 @@ import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -54,6 +53,7 @@ import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutMappingRequest; +import org.elasticsearch.client.indices.GetFieldMappingsRequest; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -258,7 +258,7 @@ public void testGetMapping() throws IOException { Assert.assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); } - public void testGetFieldMapping() throws IOException { + public void testGetFieldMapping() { GetFieldMappingsRequest getFieldMappingsRequest = new GetFieldMappingsRequest(); String[] indices = Strings.EMPTY_ARRAY; @@ -269,6 +269,50 @@ public void testGetFieldMapping() throws IOException { getFieldMappingsRequest.indices((String[]) null); } + String[] fields = null; + if (ESTestCase.randomBoolean()) { + fields = new String[ESTestCase.randomIntBetween(1, 5)]; + for (int i = 0; i < fields.length; i++) { + fields[i] = ESTestCase.randomAlphaOfLengthBetween(3, 10); + } + getFieldMappingsRequest.fields(fields); + } else if (ESTestCase.randomBoolean()) { + getFieldMappingsRequest.fields((String[]) null); + } + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(getFieldMappingsRequest::indicesOptions, getFieldMappingsRequest::indicesOptions, + expectedParams); + RequestConvertersTests.setRandomLocal(getFieldMappingsRequest::local, expectedParams); + + Request request = IndicesRequestConverters.getFieldMapping(getFieldMappingsRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_mapping"); + endpoint.add("field"); + if (fields != null) { + endpoint.add(String.join(",", fields)); + } + Assert.assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + Assert.assertThat(expectedParams, equalTo(request.getParameters())); + Assert.assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); + } + + public void testGetFieldMappingWithTypes() { + org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest getFieldMappingsRequest = + new org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest(); + + String[] indices = Strings.EMPTY_ARRAY; + if (ESTestCase.randomBoolean()) { + indices = RequestConvertersTests.randomIndicesNames(0, 5); + getFieldMappingsRequest.indices(indices); + } else if (ESTestCase.randomBoolean()) { + getFieldMappingsRequest.indices((String[]) null); + } + String type = null; if (ESTestCase.randomBoolean()) { type = ESTestCase.randomAlphaOfLengthBetween(3, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java index b05c7a0dde368..1f6373aff6a8c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java @@ -630,6 +630,22 @@ public void onFailure(Exception e) { public void testGetFollowStats() throws Exception { RestHighLevelClient client = highLevelClient(); + { + // Create leader index: + CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader"); + createIndexRequest.settings(Collections.singletonMap("index.soft_deletes.enabled", true)); + CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + } + { + // Follow index, so that we can query for follow stats: + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", "follower"); + PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); + assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); + assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); + assertThat(putFollowResponse.isIndexFollowingStarted(), is(true)); + } + // tag::ccr-get-follow-stats-request FollowStatsRequest request = new FollowStatsRequest("follower"); // <1> @@ -671,6 +687,12 @@ public void onFailure(Exception e) { // end::ccr-get-follow-stats-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + { + PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower"); + AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT); + assertThat(pauseFollowResponse.isAcknowledged(), is(true)); + } } static Map toMap(Response response) throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 9e13f2cb34d92..ef649dcfc48f9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -42,8 +42,8 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; +import org.elasticsearch.client.indices.GetFieldMappingsRequest; +import org.elasticsearch.client.indices.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -725,8 +725,7 @@ public void testGetFieldMapping() throws IOException, InterruptedException { // tag::get-field-mappings-request GetFieldMappingsRequest request = new GetFieldMappingsRequest(); // <1> request.indices("twitter"); // <2> - request.types("_doc"); // <3> - request.fields("message", "timestamp"); // <4> + request.fields("message", "timestamp"); // <3> // end::get-field-mappings-request // tag::get-field-mappings-request-indicesOptions @@ -745,12 +744,12 @@ public void testGetFieldMapping() throws IOException, InterruptedException { // end::get-field-mappings-execute // tag::get-field-mappings-response - final Map>> mappings = + final Map> mappings = response.mappings();// <1> - final Map typeMappings = - mappings.get("twitter").get("_doc"); // <2> + final Map fieldMappings = + mappings.get("twitter"); // <2> final GetFieldMappingsResponse.FieldMappingMetaData metaData = - typeMappings.get("message");// <3> + fieldMappings.get("message");// <3> final String fullName = metaData.fullName();// <4> final Map source = metaData.sourceAsMap(); // <5> @@ -777,11 +776,11 @@ public void onFailure(Exception e) { final CountDownLatch latch = new CountDownLatch(1); final ActionListener latchListener = new LatchedActionListener<>(listener, latch); listener = ActionListener.wrap(r -> { - final Map>> mappings = + final Map> mappings = r.mappings(); - final Map typeMappings = - mappings.get("twitter").get("_doc"); - final GetFieldMappingsResponse.FieldMappingMetaData metaData1 = typeMappings.get("message"); + final Map fieldMappings = + mappings.get("twitter"); + final GetFieldMappingsResponse.FieldMappingMetaData metaData1 = fieldMappings.get("message"); final String fullName = metaData1.fullName(); final Map source = metaData1.sourceAsMap(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetFieldMappingsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetFieldMappingsResponseTests.java new file mode 100644 index 0000000000000..aa8ce3bb6c098 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetFieldMappingsResponseTests.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.client.indices.GetFieldMappingsResponse.FieldMappingMetaData; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class GetFieldMappingsResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + GetFieldMappingsResponseTests::createTestInstance, + GetFieldMappingsResponseTests::toXContent, + GetFieldMappingsResponse::fromXContent) + .supportsUnknownFields(true) + .randomFieldsExcludeFilter(getRandomFieldsExcludeFilter()) + .test(); + } + + Predicate getRandomFieldsExcludeFilter() { + // allow random fields at the level of `index` and `index.mappings.field` + // otherwise random field could be evaluated as index name or type name + return s -> false == (s.matches("(?[^.]+)") + || s.matches("(?[^.]+)\\.mappings\\.(?[^.]+)")); + } + + static GetFieldMappingsResponse createTestInstance() { + Map> mappings = new HashMap<>(); + // if mappings is empty, means that fields are not found + if (randomBoolean()) { + int indices = randomInt(10); + for (int i = 0; i < indices; i++) { + Map fieldMappings = new HashMap<>(); + int fields = randomInt(10); + for (int k = 0; k < fields; k++) { + final String mapping = randomBoolean() ? "{\"type\":\"string\"}" : "{\"type\":\"keyword\"}"; + final String fieldName = randomAlphaOfLength(8); + FieldMappingMetaData metaData = new FieldMappingMetaData(fieldName, new BytesArray(mapping)); + fieldMappings.put(fieldName, metaData); + } + mappings.put(randomAlphaOfLength(8), fieldMappings); + } + } + return new GetFieldMappingsResponse(mappings); + } + + // As the client class GetFieldMappingsResponse doesn't have toXContent method, adding this method here only for the test + static void toXContent(GetFieldMappingsResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + for (Map.Entry> indexEntry : response.mappings().entrySet()) { + builder.startObject(indexEntry.getKey()); + builder.startObject("mappings"); + Map mappings = null; + for (Map.Entry fieldEntry : indexEntry.getValue().entrySet()) { + builder.startObject(fieldEntry.getKey()); + builder.field("full_name", fieldEntry.getValue().fullName()); + builder.field("mapping", fieldEntry.getValue().sourceAsMap()); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + } + builder.endObject(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java index 40b38e40b2647..50224aa1b9ad7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; +import java.util.Map; public class PutMappingRequestTests extends AbstractXContentTestCase { @@ -45,7 +46,10 @@ protected PutMappingRequest createTestInstance() { @Override protected PutMappingRequest doParseInstance(XContentParser parser) throws IOException { PutMappingRequest request = new PutMappingRequest(); - request.source(parser.map()); + Map map = parser.map(); + if (map.isEmpty() == false) { + request.source(map); + } return request; } @@ -56,11 +60,16 @@ protected boolean supportsUnknownFields() { @Override protected void assertEqualInstances(PutMappingRequest expected, PutMappingRequest actual) { - try (XContentParser expectedJson = createParser(expected.xContentType().xContent(), expected.source()); - XContentParser actualJson = createParser(actual.xContentType().xContent(), actual.source())) { - assertEquals(expectedJson.mapOrdered(), actualJson.mapOrdered()); - } catch (IOException e) { - throw new RuntimeException(e); + if (actual.source() != null) { + try (XContentParser expectedJson = createParser(expected.xContentType().xContent(), expected.source()); + XContentParser actualJson = createParser(actual.xContentType().xContent(), actual.source())) { + assertEquals(expectedJson.mapOrdered(), actualJson.mapOrdered()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } else { + // if the original `source` is null, the parsed source should be so too + assertNull(expected.source()); } } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java index 4115ef36a5cb4..e267a68bcbd91 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Response.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java @@ -103,7 +103,7 @@ public HttpEntity getEntity() { private static final Pattern WARNING_HEADER_PATTERN = Pattern.compile( "299 " + // warn code "Elasticsearch-\\d+\\.\\d+\\.\\d+(?:-(?:alpha|beta|rc)\\d+)?(?:-SNAPSHOT)?-(?:[a-f0-9]{7}|Unknown) " + // warn agent - "\"((?:\t| |!|[\\x23-\\x5B]|[\\x5D-\\x7E]|[\\x80-\\xFF]|\\\\|\\\\\")*)\" " + // quoted warning value, captured + "\"((?:\t| |!|[\\x23-\\x5B]|[\\x5D-\\x7E]|[\\x80-\\xFF]|\\\\|\\\\\")*)\"( " + // quoted warning value, captured // quoted RFC 1123 date format "\"" + // opening quote "(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // weekday @@ -112,7 +112,7 @@ public HttpEntity getEntity() { "\\d{4} " + // 4-digit year "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) "GMT" + // GMT - "\""); // closing quote + "\")?"); // closing quote (optional, since an older version can still send a warn-date) /** * Returns a list of all warning headers returned in the response. diff --git a/distribution/build.gradle b/distribution/build.gradle index 0e439b3586bc3..3cf4a170700df 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -337,8 +337,8 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } task run(type: RunTask) { - distribution = System.getProperty('run.distribution', 'zip') - if (distribution == 'zip') { + distribution = System.getProperty('run.distribution', 'default') + if (distribution == 'default') { String licenseType = System.getProperty("run.license_type", "basic") if (licenseType == 'trial') { setting 'xpack.ml.enabled', 'true' diff --git a/docs/build.gradle b/docs/build.gradle index 91086ce1f346d..ada204949febd 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -36,7 +36,7 @@ buildRestTests.expectedUnconvertedCandidates = [ ] integTestCluster { - if ("zip".equals(integTestCluster.distribution)) { + if ("default".equals(integTestCluster.distribution)) { setting 'xpack.license.self_generated.type', 'trial' } diff --git a/docs/java-rest/high-level/indices/get_field_mappings.asciidoc b/docs/java-rest/high-level/indices/get_field_mappings.asciidoc index 0e965f8b6a3b1..2186c1b1ed4e6 100644 --- a/docs/java-rest/high-level/indices/get_field_mappings.asciidoc +++ b/docs/java-rest/high-level/indices/get_field_mappings.asciidoc @@ -18,8 +18,7 @@ include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> An empty request <2> Setting the indices to fetch mapping for -<3> The types to be returned -<4> The fields to be returned +<3> The fields to be returned ==== Optional arguments The following arguments can also optionally be provided: @@ -53,7 +52,7 @@ executed operation as follows: include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- <1> Returning all requested indices fields' mappings -<2> Retrieving the mappings for a particular index and type +<2> Retrieving the mappings for a particular index <3> Getting the mappings metadata for the `message` field <4> Getting the full name of the field <5> Getting the mapping source of the field diff --git a/docs/reference/aggregations/bucket/adjacency-matrix-aggregation.asciidoc b/docs/reference/aggregations/bucket/adjacency-matrix-aggregation.asciidoc index 1806f05d2c686..5cd67877460bf 100644 --- a/docs/reference/aggregations/bucket/adjacency-matrix-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/adjacency-matrix-aggregation.asciidoc @@ -30,7 +30,7 @@ Example: [source,js] -------------------------------------------------- -PUT /emails/_doc/_bulk?refresh +PUT /emails/_bulk?refresh { "index" : { "_id" : 1 } } { "accounts" : ["hillary", "sidney"]} { "index" : { "_id" : 2 } } diff --git a/docs/reference/aggregations/bucket/filters-aggregation.asciidoc b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc index b7e3b1edf10d2..94b91654f0c7f 100644 --- a/docs/reference/aggregations/bucket/filters-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc @@ -9,7 +9,7 @@ Example: [source,js] -------------------------------------------------- -PUT /logs/_doc/_bulk?refresh +PUT /logs/_bulk?refresh { "index" : { "_id" : 1 } } { "body" : "warning: page could not be rendered" } { "index" : { "_id" : 2 } } diff --git a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc index 0ccd58e0dc63c..af3274c1c09fe 100644 --- a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc @@ -7,20 +7,18 @@ Example: [source,js] -------------------------------------------------- -PUT /museums?include_type_name=true +PUT /museums { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "location": { + "type": "geo_point" } } } } -POST /museums/_doc/_bulk?refresh +POST /museums/_bulk?refresh {"index":{"_id":1}} {"location": "52.374081,4.912350", "city": "Amsterdam", "name": "NEMO Science Museum"} {"index":{"_id":2}} diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index 69c9d50690196..1f2ec113d31fd 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -147,7 +147,7 @@ Imagine a situation where you index the following documents into an index with 2 [source,js] -------------------------------------------------- -PUT /transactions/_doc/_bulk?refresh +PUT /transactions/_bulk?refresh {"index":{"_id":1}} {"type": "sale","amount": 80} {"index":{"_id":2}} diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index c3b6a9bad4cfc..1ea38fec9657b 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -25,6 +25,7 @@ The top_hits aggregation returns regular search hits, because of this many per h * <> * <> * <> +* <> ==== Example diff --git a/docs/reference/analysis/analyzers/configuring.asciidoc b/docs/reference/analysis/analyzers/configuring.asciidoc index f010f2ad6e9b1..994842508e8f5 100644 --- a/docs/reference/analysis/analyzers/configuring.asciidoc +++ b/docs/reference/analysis/analyzers/configuring.asciidoc @@ -8,7 +8,7 @@ to support a list of stop words: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -21,16 +21,14 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "my_text": { - "type": "text", - "analyzer": "standard", <2> - "fields": { - "english": { - "type": "text", - "analyzer": "std_english" <3> - } + "properties": { + "my_text": { + "type": "text", + "analyzer": "standard", <2> + "fields": { + "english": { + "type": "text", + "analyzer": "std_english" <3> } } } diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index adfde27138af8..7386af902fbcc 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -49,7 +49,7 @@ replace any embedded dashes in numbers with underscores, i.e `123-456-789` -> [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -100,7 +100,7 @@ camelCase words to be queried individually: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -125,12 +125,10 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "analyzer": "my_analyzer" - } + "properties": { + "text": { + "type": "text", + "analyzer": "my_analyzer" } } } diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc index 14f94ff21d3dc..2e3ae8b036c3f 100644 --- a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc @@ -104,7 +104,7 @@ length `10`: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -222,7 +222,7 @@ Below is an example of how to set up a field for _search-as-you-type_: [source,js] ----------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -250,13 +250,11 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "analyzer": "autocomplete", - "search_analyzer": "autocomplete_search" - } + "properties": { + "title": { + "type": "text", + "analyzer": "autocomplete", + "search_analyzer": "autocomplete_search" } } } diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index be41c0fdc77e5..96a01bbeb5d9e 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -85,6 +85,9 @@ Where: `date_format`:: is the optional format in which the computed date should be rendered. Defaults to `YYYY.MM.dd`. `time_zone`:: is the optional time zone . Defaults to `utc`. +Date math expressions are resolved locale-independent. Consequently, it is not possible to use any other +calendars than the Gregorian calendar. + You must enclose date math index name expressions within angle brackets, and all special characters should be URI encoded. For example: diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 0aae2365d965e..16e93ac196c8d 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -57,7 +57,7 @@ newlines. Example: [source,js] -------------------------------------------------- $ cat requests -{ "index" : { "_index" : "test", "_type" : "_doc", "_id" : "1" } } +{ "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_type":"_doc","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} @@ -72,12 +72,12 @@ example of a correct sequence of bulk commands: [source,js] -------------------------------------------------- POST _bulk -{ "index" : { "_index" : "test", "_type" : "_doc", "_id" : "1" } } +{ "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } -{ "delete" : { "_index" : "test", "_type" : "_doc", "_id" : "2" } } -{ "create" : { "_index" : "test", "_type" : "_doc", "_id" : "3" } } +{ "delete" : { "_index" : "test", "_id" : "2" } } +{ "create" : { "_index" : "test", "_id" : "3" } } { "field1" : "value3" } -{ "update" : {"_id" : "1", "_type" : "_doc", "_index" : "test"} } +{ "update" : {"_id" : "1", "_index" : "test"} } { "doc" : {"field2" : "value2"} } -------------------------------------------------- // CONSOLE @@ -265,15 +265,15 @@ the options. Example with update actions: [source,js] -------------------------------------------------- POST _bulk -{ "update" : {"_id" : "1", "_type" : "_doc", "_index" : "index1", "retry_on_conflict" : 3} } +{ "update" : {"_id" : "1", "_index" : "index1", "retry_on_conflict" : 3} } { "doc" : {"field" : "value"} } -{ "update" : { "_id" : "0", "_type" : "_doc", "_index" : "index1", "retry_on_conflict" : 3} } +{ "update" : { "_id" : "0", "_index" : "index1", "retry_on_conflict" : 3} } { "script" : { "source": "ctx._source.counter += params.param1", "lang" : "painless", "params" : {"param1" : 1}}, "upsert" : {"counter" : 1}} -{ "update" : {"_id" : "2", "_type" : "_doc", "_index" : "index1", "retry_on_conflict" : 3} } +{ "update" : {"_id" : "2", "_index" : "index1", "retry_on_conflict" : 3} } { "doc" : {"field" : "value"}, "doc_as_upsert" : true } -{ "update" : {"_id" : "3", "_type" : "_doc", "_index" : "index1", "_source" : true} } +{ "update" : {"_id" : "3", "_index" : "index1", "_source" : true} } { "doc" : {"field" : "value"} } -{ "update" : {"_id" : "4", "_type" : "_doc", "_index" : "index1"} } +{ "update" : {"_id" : "4", "_index" : "index1"} } { "doc" : {"field" : "value"}, "_source": true} -------------------------------------------------- // CONSOLE diff --git a/docs/reference/docs/concurrency-control.asciidoc b/docs/reference/docs/concurrency-control.asciidoc index e695e6b5127c9..780a9c7cf76fc 100644 --- a/docs/reference/docs/concurrency-control.asciidoc +++ b/docs/reference/docs/concurrency-control.asciidoc @@ -87,7 +87,7 @@ returns: Note: The <> can return the `_seq_no` and `_primary_term` -for each search hit by requesting the `_seq_no` and `_primary_term` <>. +for each search hit by setting <>. The sequence number and the primary term uniquely identify a change. By noting down the sequence number and primary term returned, you can make sure to only change the diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc index d6227b3c36202..1e2748916005c 100644 --- a/docs/reference/docs/termvectors.asciidoc +++ b/docs/reference/docs/termvectors.asciidoc @@ -125,21 +125,19 @@ First, we create an index that stores term vectors, payloads etc. : [source,js] -------------------------------------------------- -PUT /twitter?include_type_name=true +PUT /twitter { "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "term_vector": "with_positions_offsets_payloads", - "store" : true, - "analyzer" : "fulltext_analyzer" - }, - "fullname": { - "type": "text", - "term_vector": "with_positions_offsets_payloads", - "analyzer" : "fulltext_analyzer" - } + "properties": { + "text": { + "type": "text", + "term_vector": "with_positions_offsets_payloads", + "store" : true, + "analyzer" : "fulltext_analyzer" + }, + "fullname": { + "type": "text", + "term_vector": "with_positions_offsets_payloads", + "analyzer" : "fulltext_analyzer" } } }, diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index b79dd5c36c244..d32eeaff8c719 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -633,7 +633,7 @@ As a quick example, the following call indexes two documents (ID 1 - John Doe an [source,js] -------------------------------------------------- -POST /customer/_doc/_bulk?pretty +POST /customer/_bulk?pretty {"index":{"_id":"1"}} {"name": "John Doe" } {"index":{"_id":"2"}} @@ -645,7 +645,7 @@ This example updates the first document (ID of 1) and then deletes the second do [source,sh] -------------------------------------------------- -POST /customer/_doc/_bulk?pretty +POST /customer/_bulk?pretty {"update":{"_id":"1"}} {"doc": { "name": "John Doe becomes Jane Doe" } } {"delete":{"_id":"2"}} @@ -692,7 +692,7 @@ You can download the sample dataset (accounts.json) from https://github.com/elas [source,sh] -------------------------------------------------- -curl -H "Content-Type: application/json" -XPOST "localhost:9200/bank/_doc/_bulk?pretty&refresh" --data-binary "@accounts.json" +curl -H "Content-Type: application/json" -XPOST "localhost:9200/bank/_bulk?pretty&refresh" --data-binary "@accounts.json" curl "localhost:9200/_cat/indices?v" -------------------------------------------------- // NOTCONSOLE diff --git a/docs/reference/how-to/disk-usage.asciidoc b/docs/reference/how-to/disk-usage.asciidoc index d58233a7e01cb..713e03d188ad9 100644 --- a/docs/reference/how-to/disk-usage.asciidoc +++ b/docs/reference/how-to/disk-usage.asciidoc @@ -12,15 +12,13 @@ filter on, you can safely disable indexing on this field in your [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "integer", - "index": false - } + "properties": { + "foo": { + "type": "integer", + "index": false } } } @@ -35,15 +33,13 @@ to not write norms to the index: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "text", - "norms": false - } + "properties": { + "foo": { + "type": "text", + "norms": false } } } @@ -58,15 +54,13 @@ Elasticsearch to not index positions: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "text", - "index_options": "freqs" - } + "properties": { + "foo": { + "type": "text", + "index_options": "freqs" } } } @@ -81,16 +75,14 @@ and scoring will assume that terms appear only once in every document. [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "text", - "norms": false, - "index_options": "freqs" - } + "properties": { + "foo": { + "type": "text", + "norms": false, + "index_options": "freqs" } } } @@ -115,21 +107,19 @@ fields as `keyword`: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings": { - "match_mapping_type": "string", - "mapping": { - "type": "keyword" - } + "dynamic_templates": [ + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "keyword" } } - ] - } + } + ] } } -------------------------------------------------- diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index c0ec3f2a04c09..e8c213646578c 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -9,7 +9,7 @@ content indexed in two different ways: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "settings": { "analysis": { @@ -24,16 +24,14 @@ PUT index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "body": { - "type": "text", - "analyzer": "english", - "fields": { - "exact": { - "type": "text", - "analyzer": "english_exact" - } + "properties": { + "body": { + "type": "text", + "analyzer": "english", + "fields": { + "exact": { + "type": "text", + "analyzer": "english_exact" } } } diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index 4ab408be935ac..b136c953b8f02 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -50,22 +50,20 @@ field. [source,js] -------------------------------------------------- -PUT movies?include_type_name=true +PUT movies { "mappings": { - "_doc": { - "properties": { - "name_and_plot": { - "type": "text" - }, - "name": { - "type": "text", - "copy_to": "name_and_plot" - }, - "plot": { - "type": "text", - "copy_to": "name_and_plot" - } + "properties": { + "name_and_plot": { + "type": "text" + }, + "name": { + "type": "text", + "copy_to": "name_and_plot" + }, + "plot": { + "type": "text", + "copy_to": "name_and_plot" } } } @@ -123,14 +121,12 @@ should be mapped as a <>: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "price_range": { - "type": "keyword" - } + "properties": { + "price_range": { + "type": "keyword" } } } @@ -322,15 +318,13 @@ eagerly at refresh-time by configuring mappings as described below: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "keyword", - "eager_global_ordinals": true - } + "properties": { + "foo": { + "type": "keyword", + "eager_global_ordinals": true } } } diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index 9bdf14d970caa..a36703967af67 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -69,7 +69,7 @@ PUT _ilm/policy/my_policy [source,js] -------------------------------------------------- -GET _ilm/policy +GET _ilm/policy/my_policy -------------------------------------------------- // CONSOLE // TEST[continued] diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc index 3e6627fdd3a7e..bc41da6bdff63 100644 --- a/docs/reference/ilm/update-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -87,7 +87,7 @@ PUT _ilm/policy/my_policy ////////// [source,js] -------------------------------------------------- -GET _ilm/policy +GET _ilm/policy/my_policy -------------------------------------------------- // CONSOLE // TEST[continued] diff --git a/docs/reference/index-modules/index-sorting.asciidoc b/docs/reference/index-modules/index-sorting.asciidoc index 1a0f8c65dc98b..b4648dd256d3c 100644 --- a/docs/reference/index-modules/index-sorting.asciidoc +++ b/docs/reference/index-modules/index-sorting.asciidoc @@ -14,7 +14,7 @@ For instance the following example shows how to define a sort on a single field: [source,js] -------------------------------------------------- -PUT twitter?include_type_name=true +PUT twitter { "settings" : { "index" : { @@ -23,11 +23,9 @@ PUT twitter?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date" - } + "properties": { + "date": { + "type": "date" } } } @@ -42,7 +40,7 @@ It is also possible to sort the index by more than one field: [source,js] -------------------------------------------------- -PUT twitter?include_type_name=true +PUT twitter { "settings" : { "index" : { @@ -51,15 +49,13 @@ PUT twitter?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "username": { - "type": "keyword", - "doc_values": true - }, - "date": { - "type": "date" - } + "properties": { + "username": { + "type": "keyword", + "doc_values": true + }, + "date": { + "type": "date" } } } @@ -118,7 +114,7 @@ For example, let's say we have an index that contains events sorted by a timesta [source,js] -------------------------------------------------- -PUT events?include_type_name=true +PUT events { "settings" : { "index" : { @@ -127,11 +123,9 @@ PUT events?include_type_name=true } }, "mappings": { - "doc": { - "properties": { - "timestamp": { - "type": "date" - } + "properties": { + "timestamp": { + "type": "date" } } } diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 4d2404c3b52b0..014923d463cbd 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -20,7 +20,7 @@ settings. [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings" : { "index" : { @@ -200,7 +200,7 @@ TF-IDF: [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings": { "number_of_shards": 1, @@ -214,12 +214,10 @@ PUT /index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "field": { - "type": "text", - "similarity": "scripted_tfidf" - } + "properties": { + "field": { + "type": "text", + "similarity": "scripted_tfidf" } } } @@ -369,7 +367,7 @@ more efficient: [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings": { "number_of_shards": 1, @@ -386,12 +384,10 @@ PUT /index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "field": { - "type": "text", - "similarity": "scripted_tfidf" - } + "properties": { + "field": { + "type": "text", + "similarity": "scripted_tfidf" } } } @@ -537,7 +533,7 @@ it is <>: [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings": { "index": { diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index f9f902395c4b2..f38e62806bb9d 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -196,15 +196,13 @@ You can use the following mapping for the example index above: [source,js] -------------------------------------------------- -PUT my_ip_locations?include_type_name=true +PUT my_ip_locations { "mappings": { - "_doc": { - "properties": { - "geoip": { - "properties": { - "location": { "type": "geo_point" } - } + "properties": { + "geoip": { + "properties": { + "location": { "type": "geo_point" } } } } diff --git a/docs/reference/mapping/dynamic/field-mapping.asciidoc b/docs/reference/mapping/dynamic/field-mapping.asciidoc index c7a96a33473e5..735fddf3f84ab 100644 --- a/docs/reference/mapping/dynamic/field-mapping.asciidoc +++ b/docs/reference/mapping/dynamic/field-mapping.asciidoc @@ -66,12 +66,10 @@ Dynamic date detection can be disabled by setting `date_detection` to `false`: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "date_detection": false - } + "date_detection": false } } @@ -91,12 +89,10 @@ own <>: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_date_formats": ["MM/dd/yyyy"] - } + "dynamic_date_formats": ["MM/dd/yyyy"] } } @@ -119,12 +115,10 @@ correct solution is to map these fields explicitly, but numeric detection [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "numeric_detection": true - } + "numeric_detection": true } } diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index f10a9d3475b66..8598eab412e79 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -69,35 +69,33 @@ could use the following template: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "integers": { - "match_mapping_type": "long", - "mapping": { - "type": "integer" - } + "dynamic_templates": [ + { + "integers": { + "match_mapping_type": "long", + "mapping": { + "type": "integer" } - }, - { - "strings": { - "match_mapping_type": "string", - "mapping": { - "type": "text", - "fields": { - "raw": { - "type": "keyword", - "ignore_above": 256 - } + } + }, + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "text", + "fields": { + "raw": { + "type": "keyword", + "ignore_above": 256 } } } } - ] - } + } + ] } } @@ -125,23 +123,21 @@ fields: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "longs_as_strings": { - "match_mapping_type": "string", - "match": "long_*", - "unmatch": "*_text", - "mapping": { - "type": "long" - } + "dynamic_templates": [ + { + "longs_as_strings": { + "match_mapping_type": "string", + "match": "long_*", + "unmatch": "*_text", + "mapping": { + "type": "long" } } - ] - } + } + ] } } @@ -181,23 +177,21 @@ top-level `full_name` field, except for the `middle` field: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "full_name": { - "path_match": "name.*", - "path_unmatch": "*.middle", - "mapping": { - "type": "text", - "copy_to": "full_name" - } + "dynamic_templates": [ + { + "full_name": { + "path_match": "name.*", + "path_unmatch": "*.middle", + "mapping": { + "type": "text", + "copy_to": "full_name" } } - ] - } + } + ] } } @@ -222,32 +216,30 @@ field, and disables <> for all non-string fields: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "named_analyzers": { - "match_mapping_type": "string", - "match": "*", - "mapping": { - "type": "text", - "analyzer": "{name}" - } + "dynamic_templates": [ + { + "named_analyzers": { + "match_mapping_type": "string", + "match": "*", + "mapping": { + "type": "text", + "analyzer": "{name}" } - }, - { - "no_doc_values": { - "match_mapping_type":"*", - "mapping": { - "type": "{dynamic_type}", - "doc_values": false - } + } + }, + { + "no_doc_values": { + "match_mapping_type":"*", + "mapping": { + "type": "{dynamic_type}", + "doc_values": false } } - ] - } + } + ] } } @@ -276,21 +268,19 @@ you will have to search on the exact same value that was indexed. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings_as_keywords": { - "match_mapping_type": "string", - "mapping": { - "type": "keyword" - } + "dynamic_templates": [ + { + "strings_as_keywords": { + "match_mapping_type": "string", + "mapping": { + "type": "keyword" } } - ] - } + } + ] } } -------------------------------------------------- @@ -306,21 +296,19 @@ before 5.0): [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings_as_text": { - "match_mapping_type": "string", - "mapping": { - "type": "text" - } + "dynamic_templates": [ + { + "strings_as_text": { + "match_mapping_type": "string", + "mapping": { + "type": "text" } } - ] - } + } + ] } } -------------------------------------------------- @@ -334,28 +322,26 @@ disable the storage of these scoring factors in the index and save some space. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings_as_keywords": { - "match_mapping_type": "string", - "mapping": { - "type": "text", - "norms": false, - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } + "dynamic_templates": [ + { + "strings_as_keywords": { + "match_mapping_type": "string", + "mapping": { + "type": "text", + "norms": false, + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 } } } } - ] - } + } + ] } } -------------------------------------------------- @@ -375,31 +361,29 @@ maybe gain some indexing speed: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "unindexed_longs": { - "match_mapping_type": "long", - "mapping": { - "type": "long", - "index": false - } + "dynamic_templates": [ + { + "unindexed_longs": { + "match_mapping_type": "long", + "mapping": { + "type": "long", + "index": false } - }, - { - "unindexed_doubles": { - "match_mapping_type": "double", - "mapping": { - "type": "float", <1> - "index": false - } + } + }, + { + "unindexed_doubles": { + "match_mapping_type": "double", + "mapping": { + "type": "float", <1> + "index": false } } - ] - } + } + ] } } -------------------------------------------------- diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index acc401cf0ec78..c455c55f5ea7f 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -21,13 +21,11 @@ execute `exists` queries using those fields you might want to disable [source,js] -------------------------------------------------- -PUT tweets?include_type_name=true +PUT tweets { "mappings": { - "_doc": { - "_field_names": { - "enabled": false - } + "_field_names": { + "enabled": false } } } diff --git a/docs/reference/mapping/fields/meta-field.asciidoc b/docs/reference/mapping/fields/meta-field.asciidoc index bdfa7d94d2c0c..b2225dba4e810 100644 --- a/docs/reference/mapping/fields/meta-field.asciidoc +++ b/docs/reference/mapping/fields/meta-field.asciidoc @@ -7,16 +7,14 @@ metadata, such as the class that a document belongs to: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "_meta": { <1> - "class": "MyApp::User", - "version": { - "min": "1.0", - "max": "1.3" - } + "_meta": { <1> + "class": "MyApp::User", + "version": { + "min": "1.0", + "max": "1.3" } } } diff --git a/docs/reference/mapping/fields/routing-field.asciidoc b/docs/reference/mapping/fields/routing-field.asciidoc index ca823b2430b12..6c75b91b522a9 100644 --- a/docs/reference/mapping/fields/routing-field.asciidoc +++ b/docs/reference/mapping/fields/routing-field.asciidoc @@ -79,13 +79,11 @@ custom `routing` value required for all CRUD operations: [source,js] ------------------------------ -PUT my_index2?include_type_name=true +PUT my_index2 { "mappings": { - "_doc": { - "_routing": { - "required": true <1> - } + "_routing": { + "required": true <1> } } } diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index 8f7943ebc0ff6..757fc0fa5b662 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -13,13 +13,11 @@ within the index. For this reason, it can be disabled as follows: [source,js] -------------------------------------------------- -PUT tweets?include_type_name=true +PUT tweets { "mappings": { - "_doc": { - "_source": { - "enabled": false - } + "_source": { + "enabled": false } } } @@ -85,20 +83,18 @@ as follows: [source,js] -------------------------------------------------- -PUT logs?include_type_name=true +PUT logs { "mappings": { - "_doc": { - "_source": { - "includes": [ - "*.count", - "meta.*" - ], - "excludes": [ - "meta.description", - "meta.other.*" - ] - } + "_source": { + "includes": [ + "*.count", + "meta.*" + ], + "excludes": [ + "meta.description", + "meta.other.*" + ] } } } diff --git a/docs/reference/mapping/params/analyzer.asciidoc b/docs/reference/mapping/params/analyzer.asciidoc index 43cfb7082796f..b2652bf999a77 100644 --- a/docs/reference/mapping/params/analyzer.asciidoc +++ b/docs/reference/mapping/params/analyzer.asciidoc @@ -41,18 +41,16 @@ in the field mapping, as follows: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings": { - "_doc": { - "properties": { - "text": { <1> - "type": "text", - "fields": { - "english": { <2> - "type": "text", - "analyzer": "english" - } + "properties": { + "text": { <1> + "type": "text", + "fields": { + "english": { <2> + "type": "text", + "analyzer": "english" } } } @@ -93,7 +91,7 @@ To disable stop words for phrases a field utilising three analyzer settings will [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings":{ "analysis":{ @@ -123,14 +121,12 @@ PUT my_index?include_type_name=true } }, "mappings":{ - "_doc":{ - "properties":{ - "title": { - "type":"text", - "analyzer":"my_analyzer", <3> - "search_analyzer":"my_stop_analyzer", <4> - "search_quote_analyzer":"my_analyzer" <5> - } + "properties":{ + "title": { + "type":"text", + "analyzer":"my_analyzer", <3> + "search_analyzer":"my_stop_analyzer", <4> + "search_quote_analyzer":"my_analyzer" <5> } } } diff --git a/docs/reference/mapping/params/boost.asciidoc b/docs/reference/mapping/params/boost.asciidoc index c6f6d104c0ca0..7da03a66ac44e 100644 --- a/docs/reference/mapping/params/boost.asciidoc +++ b/docs/reference/mapping/params/boost.asciidoc @@ -6,18 +6,16 @@ Individual fields can be _boosted_ automatically -- count more towards the relev [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "boost": 2 <1> - }, - "content": { - "type": "text" - } + "properties": { + "title": { + "type": "text", + "boost": 2 <1> + }, + "content": { + "type": "text" } } } diff --git a/docs/reference/mapping/params/coerce.asciidoc b/docs/reference/mapping/params/coerce.asciidoc index dbb7ca84ba6fd..55f31262351fd 100644 --- a/docs/reference/mapping/params/coerce.asciidoc +++ b/docs/reference/mapping/params/coerce.asciidoc @@ -17,18 +17,16 @@ For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "number_one": { - "type": "integer" - }, - "number_two": { - "type": "integer", - "coerce": false - } + "properties": { + "number_one": { + "type": "integer" + }, + "number_two": { + "type": "integer", + "coerce": false } } } @@ -61,21 +59,19 @@ coercion globally across all mapping types: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.mapping.coerce": false }, "mappings": { - "_doc": { - "properties": { - "number_one": { - "type": "integer", - "coerce": true - }, - "number_two": { - "type": "integer" - } + "properties": { + "number_one": { + "type": "integer", + "coerce": true + }, + "number_two": { + "type": "integer" } } } diff --git a/docs/reference/mapping/params/copy-to.asciidoc b/docs/reference/mapping/params/copy-to.asciidoc index d56258aa73324..1796b31360aed 100644 --- a/docs/reference/mapping/params/copy-to.asciidoc +++ b/docs/reference/mapping/params/copy-to.asciidoc @@ -8,22 +8,20 @@ the `full_name` field as follows: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "first_name": { - "type": "text", - "copy_to": "full_name" <1> - }, - "last_name": { - "type": "text", - "copy_to": "full_name" <1> - }, - "full_name": { - "type": "text" - } + "properties": { + "first_name": { + "type": "text", + "copy_to": "full_name" <1> + }, + "last_name": { + "type": "text", + "copy_to": "full_name" <1> + }, + "full_name": { + "type": "text" } } } diff --git a/docs/reference/mapping/params/doc-values.asciidoc b/docs/reference/mapping/params/doc-values.asciidoc index 74efc1bbbfabd..5680cdabba884 100644 --- a/docs/reference/mapping/params/doc-values.asciidoc +++ b/docs/reference/mapping/params/doc-values.asciidoc @@ -23,18 +23,16 @@ value from a script, you can disable doc values in order to save disk space: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "status_code": { <1> - "type": "keyword" - }, - "session_id": { <2> - "type": "keyword", - "doc_values": false - } + "properties": { + "status_code": { <1> + "type": "keyword" + }, + "session_id": { <2> + "type": "keyword", + "doc_values": false } } } diff --git a/docs/reference/mapping/params/dynamic.asciidoc b/docs/reference/mapping/params/dynamic.asciidoc index f7159724f48f4..62d61f5f095a6 100644 --- a/docs/reference/mapping/params/dynamic.asciidoc +++ b/docs/reference/mapping/params/dynamic.asciidoc @@ -58,21 +58,19 @@ object or from the mapping type. For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic": false, <1> - "properties": { - "user": { <2> - "properties": { - "name": { - "type": "text" - }, - "social_networks": { <3> - "dynamic": true, - "properties": {} - } + "dynamic": false, <1> + "properties": { + "user": { <2> + "properties": { + "name": { + "type": "text" + }, + "social_networks": { <3> + "dynamic": true, + "properties": {} } } } diff --git a/docs/reference/mapping/params/enabled.asciidoc b/docs/reference/mapping/params/enabled.asciidoc index fbcd25c716bbf..06b76ddeae006 100644 --- a/docs/reference/mapping/params/enabled.asciidoc +++ b/docs/reference/mapping/params/enabled.asciidoc @@ -15,20 +15,18 @@ in any other way: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "user_id": { - "type": "keyword" - }, - "last_updated": { - "type": "date" - }, - "session_data": { <1> - "enabled": false - } + "properties": { + "user_id": { + "type": "keyword" + }, + "last_updated": { + "type": "date" + }, + "session_data": { <1> + "enabled": false } } } @@ -63,12 +61,10 @@ retrieved, but none of its contents are indexed in any way: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { <1> - "enabled": false - } + "enabled": false <1> } } @@ -88,7 +84,7 @@ GET my_index/_doc/session_1 <2> GET my_index/_mapping <3> -------------------------------------------------- // CONSOLE -<1> The entire `_doc` mapping type is disabled. +<1> The entire mapping type is disabled. <2> The document can be retrieved. <3> Checking the mapping reveals that no fields have been added. diff --git a/docs/reference/mapping/params/fielddata.asciidoc b/docs/reference/mapping/params/fielddata.asciidoc index eee7463c6400c..42f02b7ee28ea 100644 --- a/docs/reference/mapping/params/fielddata.asciidoc +++ b/docs/reference/mapping/params/fielddata.asciidoc @@ -55,17 +55,15 @@ enabled for aggregations, as follows: [source,js] --------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_field": { <1> - "type": "text", - "fields": { - "keyword": { <2> - "type": "keyword" - } + "properties": { + "my_field": { <1> + "type": "text", + "fields": { + "keyword": { <2> + "type": "keyword" } } } @@ -118,19 +116,17 @@ number of docs that the segment should contain with `min_segment_size`: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "tag": { - "type": "text", - "fielddata": true, - "fielddata_frequency_filter": { - "min": 0.001, - "max": 0.1, - "min_segment_size": 500 - } + "properties": { + "tag": { + "type": "text", + "fielddata": true, + "fielddata_frequency_filter": { + "min": 0.001, + "max": 0.1, + "min_segment_size": 500 } } } diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index 641433685d3a2..2be1bdf12d891 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -11,15 +11,13 @@ Besides the <>, your own [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date", - "format": "yyyy-MM-dd" - } + "properties": { + "date": { + "type": "date", + "format": "yyyy-MM-dd" } } } diff --git a/docs/reference/mapping/params/ignore-above.asciidoc b/docs/reference/mapping/params/ignore-above.asciidoc index 19c275dac5d01..daf5c92bcf34d 100644 --- a/docs/reference/mapping/params/ignore-above.asciidoc +++ b/docs/reference/mapping/params/ignore-above.asciidoc @@ -8,15 +8,13 @@ NOTE: All strings/array elements will still be present in the `_source` field, i [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "message": { - "type": "keyword", - "ignore_above": 20 <1> - } + "properties": { + "message": { + "type": "keyword", + "ignore_above": 20 <1> } } } diff --git a/docs/reference/mapping/params/ignore-malformed.asciidoc b/docs/reference/mapping/params/ignore-malformed.asciidoc index 84a53515d9ebe..30aa6c4e8bc0d 100644 --- a/docs/reference/mapping/params/ignore-malformed.asciidoc +++ b/docs/reference/mapping/params/ignore-malformed.asciidoc @@ -14,18 +14,16 @@ For example: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "number_one": { - "type": "integer", - "ignore_malformed": true - }, - "number_two": { - "type": "integer" - } + "properties": { + "number_one": { + "type": "integer", + "ignore_malformed": true + }, + "number_two": { + "type": "integer" } } } @@ -61,21 +59,19 @@ allow to ignore malformed content globally across all mapping types. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.mapping.ignore_malformed": true <1> }, "mappings": { - "_doc": { - "properties": { - "number_one": { <1> - "type": "byte" - }, - "number_two": { - "type": "integer", - "ignore_malformed": false <2> - } + "properties": { + "number_one": { <1> + "type": "byte" + }, + "number_two": { + "type": "integer", + "ignore_malformed": false <2> } } } diff --git a/docs/reference/mapping/params/index-options.asciidoc b/docs/reference/mapping/params/index-options.asciidoc index cda680399dde2..527050a87b29b 100644 --- a/docs/reference/mapping/params/index-options.asciidoc +++ b/docs/reference/mapping/params/index-options.asciidoc @@ -35,15 +35,13 @@ all other fields use `docs` as the default. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "index_options": "offsets" - } + "properties": { + "text": { + "type": "text", + "index_options": "offsets" } } } diff --git a/docs/reference/mapping/params/index-prefixes.asciidoc b/docs/reference/mapping/params/index-prefixes.asciidoc index baca8606f5c03..841ccdde08776 100644 --- a/docs/reference/mapping/params/index-prefixes.asciidoc +++ b/docs/reference/mapping/params/index-prefixes.asciidoc @@ -19,15 +19,13 @@ This example creates a text field using the default prefix length settings: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "body_text": { - "type": "text", - "index_prefixes": { } <1> - } + "properties": { + "body_text": { + "type": "text", + "index_prefixes": { } <1> } } } @@ -42,17 +40,15 @@ This example uses custom prefix length settings: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "full_name": { - "type": "text", - "index_prefixes": { - "min_chars" : 1, - "max_chars" : 10 - } + "properties": { + "full_name": { + "type": "text", + "index_prefixes": { + "min_chars" : 1, + "max_chars" : 10 } } } diff --git a/docs/reference/mapping/params/multi-fields.asciidoc b/docs/reference/mapping/params/multi-fields.asciidoc index e4bdb04506d92..ee1bc02c7fd8d 100644 --- a/docs/reference/mapping/params/multi-fields.asciidoc +++ b/docs/reference/mapping/params/multi-fields.asciidoc @@ -8,17 +8,15 @@ search, and as a `keyword` field for sorting or aggregations: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "city": { - "type": "text", - "fields": { - "raw": { <1> - "type": "keyword" - } + "properties": { + "city": { + "type": "text", + "fields": { + "raw": { <1> + "type": "keyword" } } } @@ -76,18 +74,16 @@ which stems words into their root form: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "text": { <1> - "type": "text", - "fields": { - "english": { <2> - "type": "text", - "analyzer": "english" - } + "properties": { + "text": { <1> + "type": "text", + "fields": { + "english": { <2> + "type": "text", + "analyzer": "english" } } } diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index 79ba39e194726..da0298abda228 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -12,7 +12,7 @@ such as the <> query. [source,js] -------------------------------- -PUT index?include_type_name=true +PUT index { "settings": { "analysis": { @@ -26,12 +26,10 @@ PUT index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "keyword", - "normalizer": "my_normalizer" - } + "properties": { + "foo": { + "type": "keyword", + "normalizer": "my_normalizer" } } } diff --git a/docs/reference/mapping/params/null-value.asciidoc b/docs/reference/mapping/params/null-value.asciidoc index b2f46a2c7cef3..0a618ddcac9bc 100644 --- a/docs/reference/mapping/params/null-value.asciidoc +++ b/docs/reference/mapping/params/null-value.asciidoc @@ -10,15 +10,13 @@ the specified value so that it can be indexed and searched. For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "status_code": { - "type": "keyword", - "null_value": "NULL" <1> - } + "properties": { + "status_code": { + "type": "keyword", + "null_value": "NULL" <1> } } } diff --git a/docs/reference/mapping/params/position-increment-gap.asciidoc b/docs/reference/mapping/params/position-increment-gap.asciidoc index ae6cc85865709..853f98ade75b8 100644 --- a/docs/reference/mapping/params/position-increment-gap.asciidoc +++ b/docs/reference/mapping/params/position-increment-gap.asciidoc @@ -51,15 +51,13 @@ The `position_increment_gap` can be specified in the mapping. For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "names": { - "type": "text", - "position_increment_gap": 0 <1> - } + "properties": { + "names": { + "type": "text", + "position_increment_gap": 0 <1> } } } diff --git a/docs/reference/mapping/params/properties.asciidoc b/docs/reference/mapping/params/properties.asciidoc index 2efef0abf3cdc..9837dd3d5d9f4 100644 --- a/docs/reference/mapping/params/properties.asciidoc +++ b/docs/reference/mapping/params/properties.asciidoc @@ -15,23 +15,21 @@ field, and a `nested` field: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { <1> - "properties": { - "manager": { <2> - "properties": { - "age": { "type": "integer" }, - "name": { "type": "text" } - } - }, - "employees": { <3> - "type": "nested", - "properties": { - "age": { "type": "integer" }, - "name": { "type": "text" } - } + "properties": { <1> + "manager": { + "properties": { <2> + "age": { "type": "integer" }, + "name": { "type": "text" } + } + }, + "employees": { + "type": "nested", + "properties": { <3> + "age": { "type": "integer" }, + "name": { "type": "text" } } } } @@ -58,7 +56,7 @@ PUT my_index/_doc/1 <4> } -------------------------------------------------- // CONSOLE -<1> Properties under the `_doc` mapping type. +<1> Properties in the top-level mappings definition. <2> Properties under the `manager` object field. <3> Properties under the `employees` nested field. <4> An example document which corresponds to the above mapping. diff --git a/docs/reference/mapping/params/search-analyzer.asciidoc b/docs/reference/mapping/params/search-analyzer.asciidoc index eb483a3af384f..9b142f58c5960 100644 --- a/docs/reference/mapping/params/search-analyzer.asciidoc +++ b/docs/reference/mapping/params/search-analyzer.asciidoc @@ -14,7 +14,7 @@ this can be overridden with the `search_analyzer` setting: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -38,13 +38,11 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "analyzer": "autocomplete", <2> - "search_analyzer": "standard" <2> - } + "properties": { + "text": { + "type": "text", + "analyzer": "autocomplete", <2> + "search_analyzer": "standard" <2> } } } diff --git a/docs/reference/mapping/params/similarity.asciidoc b/docs/reference/mapping/params/similarity.asciidoc index 9e6e4e0877830..8085adf9110de 100644 --- a/docs/reference/mapping/params/similarity.asciidoc +++ b/docs/reference/mapping/params/similarity.asciidoc @@ -36,18 +36,16 @@ as follows: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "default_field": { <1> - "type": "text" - }, - "boolean_sim_field": { - "type": "text", - "similarity": "boolean" <2> - } + "properties": { + "default_field": { <1> + "type": "text" + }, + "boolean_sim_field": { + "type": "text", + "similarity": "boolean" <2> } } } diff --git a/docs/reference/mapping/params/store.asciidoc b/docs/reference/mapping/params/store.asciidoc index 186666ef5dd73..d3ebe13d4ad62 100644 --- a/docs/reference/mapping/params/store.asciidoc +++ b/docs/reference/mapping/params/store.asciidoc @@ -18,22 +18,20 @@ to extract those fields from a large `_source` field: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "store": true <1> - }, - "date": { - "type": "date", - "store": true <1> - }, - "content": { - "type": "text" - } + "properties": { + "title": { + "type": "text", + "store": true <1> + }, + "date": { + "type": "date", + "store": true <1> + }, + "content": { + "type": "text" } } } diff --git a/docs/reference/mapping/params/term-vector.asciidoc b/docs/reference/mapping/params/term-vector.asciidoc index 491a002f3d612..ff05539522efc 100644 --- a/docs/reference/mapping/params/term-vector.asciidoc +++ b/docs/reference/mapping/params/term-vector.asciidoc @@ -29,15 +29,13 @@ index. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "term_vector": "with_positions_offsets" - } + "properties": { + "text": { + "type": "text", + "term_vector": "with_positions_offsets" } } } diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index 251025c1ba24c..ee5ee4b4fe664 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -112,7 +112,7 @@ have looked something like this: [source,js] ---- -PUT twitter?include_type_name=true +PUT twitter { "mappings": { "user": { @@ -157,16 +157,16 @@ GET twitter/tweet/_search ---- // NOTCONSOLE -You could achieve the same thing by adding a custom `type` field as follows: +You can achieve the same thing by adding a custom `type` field as follows: [source,js] ---- -PUT twitter?include_type_name=true +PUT twitter?include_type_name=true <1> { "mappings": { "_doc": { "properties": { - "type": { "type": "keyword" }, <1> + "type": { "type": "keyword" }, <2> "name": { "type": "text" }, "user_name": { "type": "keyword" }, "email": { "type": "keyword" }, @@ -204,7 +204,7 @@ GET twitter/_search }, "filter": { "match": { - "type": "tweet" <1> + "type": "tweet" <2> } } } @@ -212,7 +212,9 @@ GET twitter/_search } ---- // NOTCONSOLE -<1> The explicit `type` field takes the place of the implicit `_type` field. +<1> Use `include_type_name=true` in case need to use the "old" syntax including the "_doc" object like +in this example +<2> The explicit `type` field takes the place of the implicit `_type` field. [float] ==== Parent/Child without mapping types @@ -299,7 +301,7 @@ This first example splits our `twitter` index into a `tweets` index and a [source,js] ---- -PUT users?include_type_name=true +PUT users { "settings": { "index.mapping.single_type": true @@ -321,7 +323,7 @@ PUT users?include_type_name=true } } -PUT tweets?include_type_name=true +PUT tweets { "settings": { "index.mapping.single_type": true @@ -376,7 +378,7 @@ documents of different types which have conflicting IDs: [source,js] ---- -PUT new_twitter?include_type_name=true +PUT new_twitter { "mappings": { "_doc": { @@ -427,10 +429,12 @@ POST _reindex [float] === Use `include_type_name=false` to prepare for upgrade to 8.0 -Index creation, mappings and document APIs support the `include_type_name` -option. When set to `false`, this option enables the behavior that will become -default in 8.0 when types are removed. See some examples of interactions with -Elasticsearch with this option turned off: +Index creation and mapping APIs support a new `include_type_name` url parameter +starting with version 6.7. It will default to `true` in version 6.7, default to +`false` in version 7.0 and will be removed in version 8.0. When set to `true`, +this parameter enables the pre-7.0 behavior of using type names in the API. + +See some examples of interactions with Elasticsearch with this option turned off: [float] ==== Index creation diff --git a/docs/reference/mapping/types/alias.asciidoc b/docs/reference/mapping/types/alias.asciidoc index 75a2b65e5c110..318124d71333e 100644 --- a/docs/reference/mapping/types/alias.asciidoc +++ b/docs/reference/mapping/types/alias.asciidoc @@ -7,21 +7,19 @@ and selected other APIs like <>. [source,js] -------------------------------- -PUT trips?include_type_name=true +PUT trips { "mappings": { - "_doc": { - "properties": { - "distance": { - "type": "long" - }, - "route_length_miles": { - "type": "alias", - "path": "distance" // <1> - }, - "transit_mode": { - "type": "keyword" - } + "properties": { + "distance": { + "type": "long" + }, + "route_length_miles": { + "type": "alias", + "path": "distance" // <1> + }, + "transit_mode": { + "type": "keyword" } } } diff --git a/docs/reference/mapping/types/binary.asciidoc b/docs/reference/mapping/types/binary.asciidoc index ebd50802b8d84..22e107dab565d 100644 --- a/docs/reference/mapping/types/binary.asciidoc +++ b/docs/reference/mapping/types/binary.asciidoc @@ -7,17 +7,15 @@ stored by default and is not searchable: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "name": { - "type": "text" - }, - "blob": { - "type": "binary" - } + "properties": { + "name": { + "type": "text" + }, + "blob": { + "type": "binary" } } } diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index 3a92ae23e2fd9..962022060b65b 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -17,14 +17,12 @@ For example: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "is_published": { - "type": "boolean" - } + "properties": { + "is_published": { + "type": "boolean" } } } diff --git a/docs/reference/mapping/types/date.asciidoc b/docs/reference/mapping/types/date.asciidoc index 03024d1383945..94aadb46fb2b6 100644 --- a/docs/reference/mapping/types/date.asciidoc +++ b/docs/reference/mapping/types/date.asciidoc @@ -30,14 +30,12 @@ For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date" <1> - } + "properties": { + "date": { + "type": "date" <1> } } } @@ -74,15 +72,13 @@ into a string. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date", - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" - } + "properties": { + "date": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" } } } diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index b12e9e120a479..b97566361a05c 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -17,17 +17,15 @@ You index a dense vector as an array of floats. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_vector": { - "type": "dense_vector" - }, - "my_text" : { - "type" : "keyword" - } + "properties": { + "my_vector": { + "type": "dense_vector" + }, + "my_text" : { + "type" : "keyword" } } } diff --git a/docs/reference/mapping/types/feature-vector.asciidoc b/docs/reference/mapping/types/feature-vector.asciidoc index 25358ab9ec3a6..b4701fc9ab7dd 100644 --- a/docs/reference/mapping/types/feature-vector.asciidoc +++ b/docs/reference/mapping/types/feature-vector.asciidoc @@ -11,14 +11,12 @@ one field to the mappings for each of them. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "topics": { - "type": "feature_vector" <1> - } + "properties": { + "topics": { + "type": "feature_vector" <1> } } } diff --git a/docs/reference/mapping/types/feature.asciidoc b/docs/reference/mapping/types/feature.asciidoc index 76eada86c59e5..7fe8ff6f935af 100644 --- a/docs/reference/mapping/types/feature.asciidoc +++ b/docs/reference/mapping/types/feature.asciidoc @@ -6,18 +6,16 @@ documents in queries with a <> query. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "pagerank": { - "type": "feature" <1> - }, - "url_length": { - "type": "feature", - "positive_score_impact": false <2> - } + "properties": { + "pagerank": { + "type": "feature" <1> + }, + "url_length": { + "type": "feature", + "positive_score_impact": false <2> } } } diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 752e611db811f..51e137fbc33b6 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -15,14 +15,12 @@ There are four ways that a geo-point may be specified, as demonstrated below: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "location": { + "type": "geo_point" } } } diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index 6206df61ac3b6..7f3f5f57d7077 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -6,14 +6,12 @@ https://en.wikipedia.org/wiki/IPv6[IPv6] addresses. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "ip_addr": { - "type": "ip" - } + "properties": { + "ip_addr": { + "type": "ip" } } } diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index e9e913be3a6d0..8ac0983dc9550 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -15,14 +15,12 @@ Below is an example of a mapping for a keyword field: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "tags": { - "type": "keyword" - } + "properties": { + "tags": { + "type": "keyword" } } } diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 4681897575189..f420e680c8590 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -75,14 +75,12 @@ queried independently of the others, with the < - } + "properties": { + "user": { + "type": "nested" <1> } } } diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index a279f9ca24e6a..f2977957ff463 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -17,21 +17,19 @@ Below is an example of configuring a mapping with numeric fields: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "number_of_bytes": { - "type": "integer" - }, - "time_in_seconds": { - "type": "float" - }, - "price": { - "type": "scaled_float", - "scaling_factor": 100 - } + "properties": { + "number_of_bytes": { + "type": "integer" + }, + "time_in_seconds": { + "type": "float" + }, + "price": { + "type": "scaled_float", + "scaling_factor": 100 } } } diff --git a/docs/reference/mapping/types/object.asciidoc b/docs/reference/mapping/types/object.asciidoc index 1bf9afa3c3277..f5b9a9df85617 100644 --- a/docs/reference/mapping/types/object.asciidoc +++ b/docs/reference/mapping/types/object.asciidoc @@ -41,22 +41,20 @@ An explicit mapping for the above document could look like this: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { <1> - "properties": { - "region": { - "type": "keyword" - }, - "manager": { <2> - "properties": { - "age": { "type": "integer" }, - "name": { <3> - "properties": { - "first": { "type": "text" }, - "last": { "type": "text" } - } + "properties": { <1> + "region": { + "type": "keyword" + }, + "manager": { <2> + "properties": { + "age": { "type": "integer" }, + "name": { <3> + "properties": { + "first": { "type": "text" }, + "last": { "type": "text" } } } } @@ -66,7 +64,7 @@ PUT my_index?include_type_name=true } -------------------------------------------------- // CONSOLE -<1> The mapping type is a type of object, and has a `properties` field. +<1> Properties in the top-level mappings definition. <2> The `manager` field is an inner `object` field. <3> The `manager.name` field is an inner `object` field within the `manager` field. diff --git a/docs/reference/mapping/types/parent-join.asciidoc b/docs/reference/mapping/types/parent-join.asciidoc index dacef7c4bc7cb..39bcaa96d7764 100644 --- a/docs/reference/mapping/types/parent-join.asciidoc +++ b/docs/reference/mapping/types/parent-join.asciidoc @@ -9,16 +9,14 @@ A parent/child relation can be defined as follows: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { <1> - "type": "join", - "relations": { - "question": "answer" <2> - } + "properties": { + "my_join_field": { <1> + "type": "join", + "relations": { + "question": "answer" <2> } } } @@ -319,18 +317,16 @@ make sense to disable eager loading: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "question": "answer" - }, - "eager_global_ordinals": false - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "question": "answer" + }, + "eager_global_ordinals": false } } } @@ -358,16 +354,14 @@ It is also possible to define multiple children for a single parent: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "question": ["answer", "comment"] <1> - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "question": ["answer", "comment"] <1> } } } @@ -388,17 +382,15 @@ Multiple levels of parent/child: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "question": ["answer", "comment"], <1> - "answer": "vote" <2> - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "question": ["answer", "comment"], <1> + "answer": "vote" <2> } } } diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index 7324826eb44ef..0096746d2df35 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -15,17 +15,15 @@ If the following mapping configures the `percolator` field type for the [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "query": { - "type": "percolator" - }, - "field": { - "type": "text" - } + "properties": { + "query": { + "type": "percolator" + }, + "field": { + "type": "text" } } } @@ -69,17 +67,15 @@ Lets take a look at the following index with a percolator field type: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc" : { - "properties": { - "query" : { - "type" : "percolator" - }, - "body" : { - "type": "text" - } + "properties": { + "query" : { + "type" : "percolator" + }, + "body" : { + "type": "text" } } } @@ -117,17 +113,15 @@ to read your queries you need to reindex your queries into a new index on the cu [source,js] -------------------------------------------------- -PUT new_index?include_type_name=true +PUT new_index { "mappings": { - "_doc" : { - "properties": { - "query" : { - "type" : "percolator" - }, - "body" : { - "type": "text" - } + "properties": { + "query" : { + "type" : "percolator" + }, + "body" : { + "type": "text" } } } @@ -269,7 +263,7 @@ with these settings and mapping: [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "analysis": { @@ -282,15 +276,13 @@ PUT /test_index?include_type_name=true } }, "mappings": { - "_doc" : { - "properties": { - "query" : { - "type": "percolator" - }, - "body" : { - "type": "text", - "analyzer": "my_analyzer" <1> - } + "properties": { + "query" : { + "type": "percolator" + }, + "body" : { + "type": "text", + "analyzer": "my_analyzer" <1> } } } @@ -442,7 +434,7 @@ Creating an index with custom analysis settings: [source,js] -------------------------------------------------- -PUT my_queries1?include_type_name=true +PUT my_queries1 { "settings": { "analysis": { @@ -466,19 +458,17 @@ PUT my_queries1?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "query": { - "type": "percolator" - }, - "my_field": { - "type": "text", - "fields": { - "prefix": { <3> - "type": "text", - "analyzer": "wildcard_prefix", - "search_analyzer": "standard" - } + "properties": { + "query": { + "type": "percolator" + }, + "my_field": { + "type": "text", + "fields": { + "prefix": { <3> + "type": "text", + "analyzer": "wildcard_prefix", + "search_analyzer": "standard" } } } @@ -595,7 +585,7 @@ before the `edge_ngram` token filter. [source,js] -------------------------------------------------- -PUT my_queries2?include_type_name=true +PUT my_queries2 { "settings": { "analysis": { @@ -628,19 +618,17 @@ PUT my_queries2?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "query": { - "type": "percolator" - }, - "my_field": { - "type": "text", - "fields": { - "suffix": { - "type": "text", - "analyzer": "wildcard_suffix", - "search_analyzer": "wildcard_suffix_search_time" <1> - } + "properties": { + "query": { + "type": "percolator" + }, + "my_field": { + "type": "text", + "fields": { + "suffix": { + "type": "text", + "analyzer": "wildcard_suffix", + "search_analyzer": "wildcard_suffix_search_time" <1> } } } diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 630458b4866e5..79c9e6629c696 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -16,21 +16,19 @@ Below is an example of configuring a mapping with various range fields followed [source,js] -------------------------------------------------- -PUT range_index?include_type_name=true +PUT range_index { "settings": { "number_of_shards": 2 }, "mappings": { - "_doc": { - "properties": { - "expected_attendees": { - "type": "integer_range" - }, - "time_frame": { - "type": "date_range", <1> - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" - } + "properties": { + "expected_attendees": { + "type": "integer_range" + }, + "time_frame": { + "type": "date_range", <1> + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" } } } diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc index df374e857ba6a..38561789b5d3f 100644 --- a/docs/reference/mapping/types/sparse-vector.asciidoc +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -20,17 +20,15 @@ Dimensions don't need to be in order. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_vector": { - "type": "sparse_vector" - }, - "my_text" : { - "type" : "keyword" - } + "properties": { + "my_vector": { + "type": "sparse_vector" + }, + "my_text" : { + "type" : "keyword" } } } diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index 1b3c0a0eaf112..ee972918988ad 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -17,14 +17,12 @@ Below is an example of a mapping for a text field: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "full_name": { - "type": "text" - } + "properties": { + "full_name": { + "type": "text" } } } diff --git a/docs/reference/mapping/types/token-count.asciidoc b/docs/reference/mapping/types/token-count.asciidoc index 469583abf1bcd..d574c25e93d19 100644 --- a/docs/reference/mapping/types/token-count.asciidoc +++ b/docs/reference/mapping/types/token-count.asciidoc @@ -9,18 +9,16 @@ For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "name": { <1> - "type": "text", - "fields": { - "length": { <2> - "type": "token_count", - "analyzer": "standard" - } + "properties": { + "name": { <1> + "type": "text", + "fields": { + "length": { <2> + "type": "token_count", + "analyzer": "standard" } } } diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index d2c935ba6d32d..18c611e97cac1 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -24,6 +24,8 @@ You must create a job before you create a {dfeed}. You can associate only one `feed_id` (required):: (string) A numerical character string that uniquely identifies the {dfeed}. + This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + hyphens, and underscores. It must start and end with alphanumeric characters. ==== Request Body diff --git a/docs/reference/ml/transforms.asciidoc b/docs/reference/ml/transforms.asciidoc index 02dd4d5469cfd..a3e7df9fdf27a 100644 --- a/docs/reference/ml/transforms.asciidoc +++ b/docs/reference/ml/transforms.asciidoc @@ -26,51 +26,49 @@ subsequent examples: [source,js] ---------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings":{ - "_doc":{ - "properties": { - "@timestamp": { - "type": "date" - }, - "aborted_count": { - "type": "long" - }, - "another_field": { - "type": "keyword" <1> - }, - "clientip": { - "type": "keyword" - }, - "coords": { - "properties": { - "lat": { - "type": "keyword" - }, - "lon": { - "type": "keyword" - } + "properties": { + "@timestamp": { + "type": "date" + }, + "aborted_count": { + "type": "long" + }, + "another_field": { + "type": "keyword" <1> + }, + "clientip": { + "type": "keyword" + }, + "coords": { + "properties": { + "lat": { + "type": "keyword" + }, + "lon": { + "type": "keyword" } - }, - "error_count": { - "type": "long" - }, - "query": { - "type": "keyword" - }, - "some_field": { - "type": "keyword" - }, - "tokenstring1":{ - "type":"keyword" - }, - "tokenstring2":{ - "type":"keyword" - }, - "tokenstring3":{ - "type":"keyword" } + }, + "error_count": { + "type": "long" + }, + "query": { + "type": "keyword" + }, + "some_field": { + "type": "keyword" + }, + "tokenstring1":{ + "type":"keyword" + }, + "tokenstring2":{ + "type":"keyword" + }, + "tokenstring3":{ + "type":"keyword" } } } diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index 16828c3751a39..b2e27c76e494d 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -52,15 +52,13 @@ instance, if the `user` field were mapped as follows: [source,js] -------------------------------------------------- -PUT /example?include_type_name=true +PUT /example { "mappings": { - "_doc": { - "properties": { - "user": { - "type": "keyword", - "null_value": "_null_" - } + "properties": { + "user": { + "type": "keyword", + "null_value": "_null_" } } } diff --git a/docs/reference/query-dsl/feature-query.asciidoc b/docs/reference/query-dsl/feature-query.asciidoc index c261133cff412..353c135bf8efd 100644 --- a/docs/reference/query-dsl/feature-query.asciidoc +++ b/docs/reference/query-dsl/feature-query.asciidoc @@ -27,21 +27,19 @@ based or `pagerank`, `url_length` and the `sports` topic. [source,js] -------------------------------------------------- -PUT test?include_type_name=true +PUT test { "mappings": { - "_doc": { - "properties": { - "pagerank": { - "type": "feature" - }, - "url_length": { - "type": "feature", - "positive_score_impact": false - }, - "topics": { - "type": "feature_vector" - } + "properties": { + "pagerank": { + "type": "feature" + }, + "url_length": { + "type": "feature", + "positive_score_impact": false + }, + "topics": { + "type": "feature_vector" } } } diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 8c2b25ea0ee7a..487e944c09e10 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -6,16 +6,14 @@ bounding box. Assuming the following indexed document: [source,js] -------------------------------------------------- -PUT /my_locations?include_type_name=true +PUT /my_locations { "mappings": { - "_doc": { - "properties": { - "pin": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "pin": { + "properties": { + "location": { + "type": "geo_point" } } } diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index da1f99f834d67..da7b0ecfd81e5 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -7,16 +7,14 @@ document: [source,js] -------------------------------------------------- -PUT /my_locations?include_type_name=true +PUT /my_locations { "mappings": { - "_doc": { - "properties": { - "pin": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "pin": { + "properties": { + "location": { + "type": "geo_point" } } } diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index 4121ebfc45edd..424968090d6ab 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -24,14 +24,12 @@ Given the following index: [source,js] -------------------------------------------------- -PUT /example?include_type_name=true +PUT /example { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_shape" - } + "properties": { + "location": { + "type": "geo_shape" } } } @@ -99,14 +97,12 @@ shape: [source,js] -------------------------------------------------- -PUT /shapes?include_type_name=true +PUT /shapes { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_shape" - } + "properties": { + "location": { + "type": "geo_shape" } } } diff --git a/docs/reference/query-dsl/nested-query.asciidoc b/docs/reference/query-dsl/nested-query.asciidoc index f75348ca976ec..c58d68b73cff1 100644 --- a/docs/reference/query-dsl/nested-query.asciidoc +++ b/docs/reference/query-dsl/nested-query.asciidoc @@ -10,14 +10,12 @@ will work with: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings": { - "_doc" : { - "properties" : { - "obj1" : { - "type" : "nested" - } + "properties" : { + "obj1" : { + "type" : "nested" } } } diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index ff306238ddea0..aa2074e7d1b7e 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -6,16 +6,14 @@ Given the following mapping definition: [source,js] -------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "my_parent": "my_child" - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "my_parent": "my_child" } } } diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 9ae6dd802137c..89264af0f2619 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -13,18 +13,16 @@ Create an index with two fields: [source,js] -------------------------------------------------- -PUT /my-index?include_type_name=true +PUT /my-index { "mappings": { - "_doc": { - "properties": { - "message": { - "type": "text" - }, - "query": { - "type": "percolator" - } - } + "properties": { + "message": { + "type": "text" + }, + "query": { + "type": "percolator" + } } } } diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index dfd06a04523c3..e77791270318f 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -52,19 +52,19 @@ that can help you with scoring. We suggest you to use them instead of rewriting equivalent functions of your own, as these functions try to be the most efficient by using the internal mechanisms. -===== rational -latexmath:[rational(value,k) = value/(k + value)] +===== saturation +`saturation(value,k) = value/(k + value)` [source,js] -------------------------------------------------- "script" : { - "source" : "rational(doc['likes'].value, 1)" + "source" : "saturation(doc['likes'].value, 1)" } -------------------------------------------------- // NOTCONSOLE ===== sigmoid -latexmath:[sigmoid(value, k, a) = value^a/ (k^a + value^a)] +`sigmoid(value, k, a) = value^a/ (k^a + value^a)` [source,js] -------------------------------------------------- @@ -78,21 +78,22 @@ latexmath:[sigmoid(value, k, a) = value^a/ (k^a + value^a)] [[random-functions]] ===== Random functions There are two predefined ways to produce random values: +`randomNotReproducible` and `randomReproducible`. -1. `randomNotReproducible()` uses `java.util.Random` class +`randomNotReproducible()` uses `java.util.Random` class to generate a random value of the type `long`. The generated values are not reproducible between requests' invocations. - [source,js] - -------------------------------------------------- - "script" : { - "source" : "randomNotReproducible()" - } - -------------------------------------------------- - // NOTCONSOLE +[source,js] +-------------------------------------------------- +"script" : { + "source" : "randomNotReproducible()" +} +-------------------------------------------------- +// NOTCONSOLE -2. `randomReproducible(String seedValue, int seed)` produces +`randomReproducible(String seedValue, int seed)` produces reproducible random values of type `long`. This function requires more computational time and memory than the non-reproducible version. @@ -102,13 +103,13 @@ in the memory. For example, values of the document's `_seq_no` field is a good candidate, as documents on the same shard have unique values for the `_seq_no` field. - [source,js] - -------------------------------------------------- - "script" : { - "source" : "randomReproducible(Long.toString(doc['_seq_no'].value), 100)" - } - -------------------------------------------------- - // NOTCONSOLE +[source,js] +-------------------------------------------------- +"script" : { + "source" : "randomReproducible(Long.toString(doc['_seq_no'].value), 100)" +} +-------------------------------------------------- +// NOTCONSOLE A drawback of using `_seq_no` is that generated values change if @@ -121,13 +122,13 @@ you can use a field with unique values across shards, such as `_id`, but watch out for the memory usage as all these unique values need to be loaded into memory. - [source,js] - -------------------------------------------------- - "script" : { - "source" : "randomReproducible(doc['_id'].value, 100)" - } - -------------------------------------------------- - // NOTCONSOLE +[source,js] +-------------------------------------------------- +"script" : { + "source" : "randomReproducible(doc['_id'].value, 100)" +} +-------------------------------------------------- +// NOTCONSOLE [[decay-functions]] @@ -152,7 +153,7 @@ You can read more about decay functions } -------------------------------------------------- // NOTCONSOLE -<1> Use `params` to compile a script only once for different values of parameters +<1> Using `params` allows to compile the script only once, even if params change. ===== Decay functions for geo fields diff --git a/docs/reference/query-dsl/term-query.asciidoc b/docs/reference/query-dsl/term-query.asciidoc index 26dd6ee492f21..910123bbe6177 100644 --- a/docs/reference/query-dsl/term-query.asciidoc +++ b/docs/reference/query-dsl/term-query.asciidoc @@ -87,17 +87,15 @@ To demonstrate, try out the example below. First, create an index, specifying t [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "full_text": { - "type": "text" <1> - }, - "exact_value": { - "type": "keyword" <2> - } + "properties": { + "full_text": { + "type": "text" <1> + }, + "exact_value": { + "type": "keyword" <2> } } } diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 5c0e00ce360fa..3ebfb672e205f 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -12,14 +12,12 @@ be a number field: [source,js] -------------------------------------------------- -PUT /my-index?include_type_name=true +PUT /my-index { "mappings": { - "_doc": { - "properties": { - "required_matches": { - "type": "long" - } + "properties": { + "required_matches": { + "type": "long" } } } diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index 9970c4cc6223f..dac7622aab8ed 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -213,7 +213,7 @@ include::request/preference.asciidoc[] include::request/explain.asciidoc[] -include::request/version.asciidoc[] +include::request/version-and-seq-no.asciidoc[] include::request/index-boost.asciidoc[] diff --git a/docs/reference/search/request/highlighters-internal.asciidoc b/docs/reference/search/request/highlighters-internal.asciidoc index aa3377bdccf61..11534a01aa234 100644 --- a/docs/reference/search/request/highlighters-internal.asciidoc +++ b/docs/reference/search/request/highlighters-internal.asciidoc @@ -87,15 +87,13 @@ using `english` analyzer, and will be indexed without offsets or term vectors. [source,js] -------------------------------------------------- -PUT test_index?include_type_name=true +PUT test_index { "mappings": { - "_doc": { - "properties": { - "content" : { - "type" : "text", - "analyzer" : "english" - } + "properties": { + "content" : { + "type" : "text", + "analyzer" : "english" } } } diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index bcd2c297e5da3..b287b1609703e 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -76,6 +76,7 @@ Inner hits also supports the following per document features: * <> * <> * <> +* <> [[nested-inner-hits]] ==== Nested inner hits diff --git a/docs/reference/search/request/post-filter.asciidoc b/docs/reference/search/request/post-filter.asciidoc index b404a30a8250a..c46cdb1e52286 100644 --- a/docs/reference/search/request/post-filter.asciidoc +++ b/docs/reference/search/request/post-filter.asciidoc @@ -9,15 +9,13 @@ Imagine that you are selling shirts that have the following properties: [source,js] -------------------------------------------------- -PUT /shirts?include_type_name=true +PUT /shirts { "mappings": { - "_doc": { - "properties": { - "brand": { "type": "keyword"}, - "color": { "type": "keyword"}, - "model": { "type": "keyword"} - } + "properties": { + "brand": { "type": "keyword"}, + "color": { "type": "keyword"}, + "model": { "type": "keyword"} } } } diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index fe7cf362ea36f..bd8c0d1ad5c27 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -9,20 +9,18 @@ Assuming the following index mapping: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings": { - "_doc": { - "properties": { - "post_date": { "type": "date" }, - "user": { - "type": "keyword" - }, - "name": { - "type": "keyword" - }, - "age": { "type": "integer" } - } + "properties": { + "post_date": { "type": "date" }, + "user": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "age": { "type": "integer" } } } } diff --git a/docs/reference/search/request/version-and-seq-no.asciidoc b/docs/reference/search/request/version-and-seq-no.asciidoc new file mode 100644 index 0000000000000..2bca4c985b290 --- /dev/null +++ b/docs/reference/search/request/version-and-seq-no.asciidoc @@ -0,0 +1,34 @@ +[[search-request-seq-no-primary-term]] +=== Sequence Numbers and Primary Term + +Returns the sequence number and primary term of the last modification to each search hit. +See <> for more details. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "seq_no_primary_term": true, + "query" : { + "term" : { "user" : "kimchy" } + } +} +-------------------------------------------------- +// CONSOLE + +[[search-request-version]] +=== Version + +Returns a version for each search hit. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "version": true, + "query" : { + "term" : { "user" : "kimchy" } + } +} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/search/request/version.asciidoc b/docs/reference/search/request/version.asciidoc deleted file mode 100644 index 57c6ce27feb91..0000000000000 --- a/docs/reference/search/request/version.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -[[search-request-version]] -=== Version - -Returns a version for each search hit. - -[source,js] --------------------------------------------------- -GET /_search -{ - "version": true, - "query" : { - "term" : { "user" : "kimchy" } - } -} --------------------------------------------------- -// CONSOLE diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 191f01f17394d..c0b527c06e550 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -24,17 +24,15 @@ which indexes the field values for fast completions. [source,js] -------------------------------------------------- -PUT music?include_type_name=true +PUT music { "mappings": { - "_doc" : { - "properties" : { - "suggest" : { - "type" : "completion" - }, - "title" : { - "type": "keyword" - } + "properties" : { + "suggest" : { + "type" : "completion" + }, + "title" : { + "type": "keyword" } } } diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index 1ef8968188ccd..63692f0b06f82 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -21,53 +21,49 @@ field: [source,js] -------------------------------------------------- -PUT place?include_type_name=true +PUT place { "mappings": { - "_doc" : { - "properties" : { - "suggest" : { - "type" : "completion", - "contexts": [ - { <1> - "name": "place_type", - "type": "category" - }, - { <2> - "name": "location", - "type": "geo", - "precision": 4 - } - ] - } + "properties" : { + "suggest" : { + "type" : "completion", + "contexts": [ + { <1> + "name": "place_type", + "type": "category" + }, + { <2> + "name": "location", + "type": "geo", + "precision": 4 + } + ] } } } } -PUT place_path_category?include_type_name=true +PUT place_path_category { "mappings": { - "_doc" : { - "properties" : { - "suggest" : { - "type" : "completion", - "contexts": [ - { <3> - "name": "place_type", - "type": "category", - "path": "cat" - }, - { <4> - "name": "location", - "type": "geo", - "precision": 4, - "path": "loc" - } - ] - }, - "loc": { - "type": "geo_point" - } + "properties" : { + "suggest" : { + "type" : "completion", + "contexts": [ + { <3> + "name": "place_type", + "type": "category", + "path": "cat" + }, + { <4> + "name": "location", + "type": "geo", + "precision": 4, + "path": "loc" + } + ] + }, + "loc": { + "type": "geo_point" } } } diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index 08a7aabd1c5de..9c2c56cc40fec 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -23,7 +23,7 @@ work. The `reverse` analyzer is used only in the last example. [source,js] -------------------------------------------------- -PUT test?include_type_name=true +PUT test { "settings": { "index": { @@ -52,19 +52,17 @@ PUT test?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "fields": { - "trigram": { - "type": "text", - "analyzer": "trigram" - }, - "reverse": { - "type": "text", - "analyzer": "reverse" - } + "properties": { + "title": { + "type": "text", + "fields": { + "trigram": { + "type": "text", + "analyzer": "trigram" + }, + "reverse": { + "type": "text", + "analyzer": "reverse" } } } diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index c17f1393e1520..47eab847336ce 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -6,7 +6,7 @@ without executing it. We'll use the following test data to explain _validate: [source,js] -------------------------------------------------- -PUT twitter/_doc/_bulk?refresh +PUT twitter/_bulk?refresh {"index":{"_id":1}} {"user" : "kimchy", "post_date" : "2009-11-15T14:12:12", "message" : "trying out Elasticsearch"} {"index":{"_id":2}} diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java b/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java index 87c0ff2817eb7..2d57faf5cb897 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java @@ -34,14 +34,17 @@ public WriteOperation createWriteOperation(SocketChannelContext context, Object return new FlushReadyWrite(context, (ByteBuffer[]) message, listener); } + @Override public List writeToBytes(WriteOperation writeOperation) { assert writeOperation instanceof FlushReadyWrite : "Write operation must be flush ready"; return Collections.singletonList((FlushReadyWrite) writeOperation); } + @Override public List pollFlushOperations() { return EMPTY_LIST; } + @Override public void close() {} } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java index fe1bc1cf4043e..abb46cf3aea03 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java @@ -19,161 +19,26 @@ package org.elasticsearch.nio; -import org.elasticsearch.nio.utils.ExceptionsHelper; - +import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.Collectors; -import java.util.stream.Stream; /** - * The NioGroup is a group of selectors for interfacing with java nio. When it is started it will create the - * configured number of selectors. Each selector will be running in a dedicated thread. Server connections - * can be bound using the {@link #bindServerChannel(InetSocketAddress, ChannelFactory)} method. Client - * connections can be opened using the {@link #openChannel(InetSocketAddress, ChannelFactory)} method. - *

- * The logic specific to a particular channel is provided by the {@link ChannelFactory} passed to the method - * when the channel is created. This is what allows an NioGroup to support different channel types. + * An class for interfacing with java.nio. Implementations implement the underlying logic for opening + * channels and registering them with the OS. */ -public class NioGroup implements AutoCloseable { - - - private final List dedicatedAcceptors; - private final RoundRobinSupplier acceptorSupplier; - - private final List selectors; - private final RoundRobinSupplier selectorSupplier; - - private final AtomicBoolean isOpen = new AtomicBoolean(true); +public interface NioGroup extends Closeable { /** - * This will create an NioGroup with no dedicated acceptors. All server channels will be handled by the - * same selectors that are handling child channels. - * - * @param threadFactory factory to create selector threads - * @param selectorCount the number of selectors to be created - * @param eventHandlerFunction function for creating event handlers - * @throws IOException occurs if there is a problem while opening a java.nio.Selector + * Opens and binds a server channel to accept incoming connections. */ - public NioGroup(ThreadFactory threadFactory, int selectorCount, Function, EventHandler> eventHandlerFunction) - throws IOException { - this(null, 0, threadFactory, selectorCount, eventHandlerFunction); - } + S bindServerChannel(InetSocketAddress address, ChannelFactory factory) throws IOException; /** - * This will create an NioGroup with dedicated acceptors. All server channels will be handled by a group - * of selectors dedicated to accepting channels. These accepted channels will be handed off the - * non-server selectors. - * - * @param acceptorThreadFactory factory to create acceptor selector threads - * @param dedicatedAcceptorCount the number of dedicated acceptor selectors to be created - * @param selectorThreadFactory factory to create non-acceptor selector threads - * @param selectorCount the number of non-acceptor selectors to be created - * @param eventHandlerFunction function for creating event handlers - * @throws IOException occurs if there is a problem while opening a java.nio.Selector + * Opens a outgoing client channel. */ - public NioGroup(ThreadFactory acceptorThreadFactory, int dedicatedAcceptorCount, ThreadFactory selectorThreadFactory, int selectorCount, - Function, EventHandler> eventHandlerFunction) throws IOException { - dedicatedAcceptors = new ArrayList<>(dedicatedAcceptorCount); - selectors = new ArrayList<>(selectorCount); - - try { - List> suppliersToSet = new ArrayList<>(selectorCount); - for (int i = 0; i < selectorCount; ++i) { - RoundRobinSupplier supplier = new RoundRobinSupplier<>(); - suppliersToSet.add(supplier); - NioSelector selector = new NioSelector(eventHandlerFunction.apply(supplier)); - selectors.add(selector); - } - for (RoundRobinSupplier supplierToSet : suppliersToSet) { - supplierToSet.setSelectors(selectors.toArray(new NioSelector[0])); - assert supplierToSet.count() == selectors.size() : "Supplier should have same count as selector list."; - } - - for (int i = 0; i < dedicatedAcceptorCount; ++i) { - RoundRobinSupplier supplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); - NioSelector acceptor = new NioSelector(eventHandlerFunction.apply(supplier)); - dedicatedAcceptors.add(acceptor); - } - - if (dedicatedAcceptorCount != 0) { - acceptorSupplier = new RoundRobinSupplier<>(dedicatedAcceptors.toArray(new NioSelector[0])); - } else { - acceptorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); - } - selectorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); - assert selectorCount == selectors.size() : "We need to have created all the selectors at this point."; - assert dedicatedAcceptorCount == dedicatedAcceptors.size() : "We need to have created all the acceptors at this point."; - - startSelectors(selectors, selectorThreadFactory); - startSelectors(dedicatedAcceptors, acceptorThreadFactory); - } catch (Exception e) { - try { - close(); - } catch (Exception e1) { - e.addSuppressed(e1); - } - throw e; - } - } - - public S bindServerChannel(InetSocketAddress address, ChannelFactory factory) - throws IOException { - ensureOpen(); - return factory.openNioServerSocketChannel(address, acceptorSupplier); - } - - public S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException { - ensureOpen(); - return factory.openNioChannel(address, selectorSupplier); - } + S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException; @Override - public void close() throws IOException { - if (isOpen.compareAndSet(true, false)) { - List toClose = Stream.concat(dedicatedAcceptors.stream(), selectors.stream()).collect(Collectors.toList()); - List closingExceptions = new ArrayList<>(); - for (NioSelector selector : toClose) { - try { - selector.close(); - } catch (IOException e) { - closingExceptions.add(e); - } - } - ExceptionsHelper.rethrowAndSuppress(closingExceptions); - } - } - - private static void startSelectors(Iterable selectors, ThreadFactory threadFactory) { - for (NioSelector selector : selectors) { - if (selector.isRunning() == false) { - threadFactory.newThread(selector::runLoop).start(); - try { - selector.isRunningFuture().get(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IllegalStateException("Interrupted while waiting for selector to start.", e); - } catch (ExecutionException e) { - if (e.getCause() instanceof RuntimeException) { - throw (RuntimeException) e.getCause(); - } else { - throw new RuntimeException("Exception during selector start.", e); - } - } - } - } - } - - private void ensureOpen() { - if (isOpen.get() == false) { - throw new IllegalStateException("NioGroup is closed."); - } - } + void close() throws IOException; } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/NioSelectorGroup.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelectorGroup.java new file mode 100644 index 0000000000000..bb4d4d14a0c07 --- /dev/null +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelectorGroup.java @@ -0,0 +1,181 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import org.elasticsearch.nio.utils.ExceptionsHelper; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * The NioSelectorGroup is a group of selectors for interfacing with java nio. When it is started it will create the + * configured number of selectors. Each selector will be running in a dedicated thread. Server connections + * can be bound using the {@link #bindServerChannel(InetSocketAddress, ChannelFactory)} method. Client + * connections can be opened using the {@link #openChannel(InetSocketAddress, ChannelFactory)} method. + *

+ * The logic specific to a particular channel is provided by the {@link ChannelFactory} passed to the method + * when the channel is created. This is what allows an NioSelectorGroup to support different channel types. + */ +public class NioSelectorGroup implements NioGroup { + + + private final List dedicatedAcceptors; + private final RoundRobinSupplier acceptorSupplier; + + private final List selectors; + private final RoundRobinSupplier selectorSupplier; + + private final AtomicBoolean isOpen = new AtomicBoolean(true); + + /** + * This will create an NioSelectorGroup with no dedicated acceptors. All server channels will be handled by the + * same selectors that are handling child channels. + * + * @param threadFactory factory to create selector threads + * @param selectorCount the number of selectors to be created + * @param eventHandlerFunction function for creating event handlers + * @throws IOException occurs if there is a problem while opening a java.nio.Selector + */ + public NioSelectorGroup(ThreadFactory threadFactory, int selectorCount, + Function, EventHandler> eventHandlerFunction) throws IOException { + this(null, 0, threadFactory, selectorCount, eventHandlerFunction); + } + + /** + * This will create an NioSelectorGroup with dedicated acceptors. All server channels will be handled by a group + * of selectors dedicated to accepting channels. These accepted channels will be handed off the + * non-server selectors. + * + * @param acceptorThreadFactory factory to create acceptor selector threads + * @param dedicatedAcceptorCount the number of dedicated acceptor selectors to be created + * @param selectorThreadFactory factory to create non-acceptor selector threads + * @param selectorCount the number of non-acceptor selectors to be created + * @param eventHandlerFunction function for creating event handlers + * @throws IOException occurs if there is a problem while opening a java.nio.Selector + */ + public NioSelectorGroup(ThreadFactory acceptorThreadFactory, int dedicatedAcceptorCount, ThreadFactory selectorThreadFactory, + int selectorCount, Function, EventHandler> eventHandlerFunction) throws IOException { + dedicatedAcceptors = new ArrayList<>(dedicatedAcceptorCount); + selectors = new ArrayList<>(selectorCount); + + try { + List> suppliersToSet = new ArrayList<>(selectorCount); + for (int i = 0; i < selectorCount; ++i) { + RoundRobinSupplier supplier = new RoundRobinSupplier<>(); + suppliersToSet.add(supplier); + NioSelector selector = new NioSelector(eventHandlerFunction.apply(supplier)); + selectors.add(selector); + } + for (RoundRobinSupplier supplierToSet : suppliersToSet) { + supplierToSet.setSelectors(selectors.toArray(new NioSelector[0])); + assert supplierToSet.count() == selectors.size() : "Supplier should have same count as selector list."; + } + + for (int i = 0; i < dedicatedAcceptorCount; ++i) { + RoundRobinSupplier supplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); + NioSelector acceptor = new NioSelector(eventHandlerFunction.apply(supplier)); + dedicatedAcceptors.add(acceptor); + } + + if (dedicatedAcceptorCount != 0) { + acceptorSupplier = new RoundRobinSupplier<>(dedicatedAcceptors.toArray(new NioSelector[0])); + } else { + acceptorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); + } + selectorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); + assert selectorCount == selectors.size() : "We need to have created all the selectors at this point."; + assert dedicatedAcceptorCount == dedicatedAcceptors.size() : "We need to have created all the acceptors at this point."; + + startSelectors(selectors, selectorThreadFactory); + startSelectors(dedicatedAcceptors, acceptorThreadFactory); + } catch (Exception e) { + try { + close(); + } catch (Exception e1) { + e.addSuppressed(e1); + } + throw e; + } + } + + @Override + public S bindServerChannel(InetSocketAddress address, ChannelFactory factory) + throws IOException { + ensureOpen(); + return factory.openNioServerSocketChannel(address, acceptorSupplier); + } + + @Override + public S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException { + ensureOpen(); + return factory.openNioChannel(address, selectorSupplier); + } + + @Override + public void close() throws IOException { + if (isOpen.compareAndSet(true, false)) { + List toClose = Stream.concat(dedicatedAcceptors.stream(), selectors.stream()).collect(Collectors.toList()); + List closingExceptions = new ArrayList<>(); + for (NioSelector selector : toClose) { + try { + selector.close(); + } catch (IOException e) { + closingExceptions.add(e); + } + } + ExceptionsHelper.rethrowAndSuppress(closingExceptions); + } + } + + private static void startSelectors(Iterable selectors, ThreadFactory threadFactory) { + for (NioSelector selector : selectors) { + if (selector.isRunning() == false) { + threadFactory.newThread(selector::runLoop).start(); + try { + selector.isRunningFuture().get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("Interrupted while waiting for selector to start.", e); + } catch (ExecutionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else { + throw new RuntimeException("Exception during selector start.", e); + } + } + } + } + } + + private void ensureOpen() { + if (isOpen.get() == false) { + throw new IllegalStateException("NioGroup is closed."); + } + } +} diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorGroupTests.java similarity index 88% rename from libs/nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorGroupTests.java index 027f1255a590c..34a8d501ea316 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorGroupTests.java @@ -30,16 +30,16 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.mockito.Mockito.mock; -public class NioGroupTests extends ESTestCase { +public class NioSelectorGroupTests extends ESTestCase { - private NioGroup nioGroup; + private NioSelectorGroup nioGroup; @Override @SuppressWarnings("unchecked") public void setUp() throws Exception { super.setUp(); - nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "acceptor"), 1, daemonThreadFactory(Settings.EMPTY, "selector"), 1, - (s) -> new EventHandler(mock(Consumer.class), s)); + nioGroup = new NioSelectorGroup(daemonThreadFactory(Settings.EMPTY, "acceptor"), 1, + daemonThreadFactory(Settings.EMPTY, "selector"), 1, (s) -> new EventHandler(mock(Consumer.class), s)); } @Override @@ -74,7 +74,7 @@ public void testCanCloseTwice() throws IOException { @SuppressWarnings("unchecked") public void testExceptionAtStartIsHandled() throws IOException { RuntimeException ex = new RuntimeException(); - CheckedRunnable ctor = () -> new NioGroup(r -> {throw ex;}, 1, + CheckedRunnable ctor = () -> new NioSelectorGroup(r -> {throw ex;}, 1, daemonThreadFactory(Settings.EMPTY, "selector"), 1, (s) -> new EventHandler(mock(Consumer.class), s)); RuntimeException runtimeException = expectThrows(RuntimeException.class, ctor::run); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterAlreadyBootstrappedException.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixAggregationInspectionHelper.java similarity index 54% rename from server/src/main/java/org/elasticsearch/cluster/coordination/ClusterAlreadyBootstrappedException.java rename to modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixAggregationInspectionHelper.java index cc1c77c88477c..c62c91477cdef 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterAlreadyBootstrappedException.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixAggregationInspectionHelper.java @@ -16,23 +16,14 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.cluster.coordination; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; +package org.elasticsearch.search.aggregations.matrix.stats; /** - * Exception thrown if trying to discovery nodes in order to perform cluster bootstrapping, but a cluster is formed before all the required - * nodes are discovered. + * Counterpart to {@link org.elasticsearch.search.aggregations.support.AggregationInspectionHelper}, providing + * helpers for some aggs in the MatrixStats package */ -public class ClusterAlreadyBootstrappedException extends ElasticsearchException { - public ClusterAlreadyBootstrappedException() { - super("node has already joined a bootstrapped cluster, bootstrapping is not required"); - } - - public ClusterAlreadyBootstrappedException(StreamInput in) throws IOException { - super(in); +public class MatrixAggregationInspectionHelper { + public static boolean hasValue(InternalMatrixStats agg) { + return agg.getResults() != null; } } diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java index aa778e6f704f9..0512f3d5db3b6 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java @@ -53,10 +53,12 @@ public void testNoData() throws Exception { .fields(Collections.singletonList("field")); InternalMatrixStats stats = search(searcher, new MatchAllDocsQuery(), aggBuilder, ft); assertNull(stats.getStats()); + assertFalse(MatrixAggregationInspectionHelper.hasValue(stats)); } } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37587") public void testTwoFields() throws Exception { String fieldA = "a"; MappedFieldType ftA = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); @@ -87,8 +89,9 @@ public void testTwoFields() throws Exception { IndexSearcher searcher = new IndexSearcher(reader); MatrixStatsAggregationBuilder aggBuilder = new MatrixStatsAggregationBuilder("my_agg") .fields(Arrays.asList(fieldA, fieldB)); - InternalMatrixStats stats = search(searcher, new MatchAllDocsQuery(), aggBuilder, ftA, ftB); + InternalMatrixStats stats = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, ftA, ftB); multiPassStats.assertNearlyEqual(new MatrixStatsResults(stats.getStats())); + assertTrue(MatrixAggregationInspectionHelper.hasValue(stats)); } } } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java index 9eb32767eb7c8..5484f7f1f9c6b 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.geoip; +import org.apache.lucene.util.Constants; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; @@ -92,6 +93,7 @@ protected Settings nodeSettings(final int nodeOrdinal) { * @throws IOException if an I/O exception occurs building the JSON */ public void testLazyLoading() throws IOException { + assumeFalse("https://github.com/elastic/elasticsearch/issues/37342", Constants.WINDOWS); final BytesReference bytes; try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.startObject(); diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..fa128372b2467 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +c84431b751d851f484f2799f6dcb9110113b1958 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 6f055c7ac5eda..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -825a0eb2b9ff58df36753971217cba80466d7132 \ No newline at end of file diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt index 3aa32eff9c7a2..3d7b29826c747 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt @@ -20,7 +20,7 @@ # This file contains a whitelist for functions to be used in Score context static_import { - double rational(double, double) from_class org.elasticsearch.script.ScoreScriptUtils + double saturation(double, double) from_class org.elasticsearch.script.ScoreScriptUtils double sigmoid(double, double, double) from_class org.elasticsearch.script.ScoreScriptUtils double randomReproducible(String, int) from_class org.elasticsearch.script.ScoreScriptUtils double randomNotReproducible() bound_to org.elasticsearch.script.ScoreScriptUtils$RandomNotReproducible diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml index df9bf0048068c..fb6f40694b271 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml @@ -5,6 +5,77 @@ setup: version: " - 6.99.99" reason: "script score query was introduced in 7.0.0" +--- +"Math functions": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 2 + mappings: + _doc: + properties: + dval: + type: double + - do: + index: + index: test + type: _doc + id: d1 + body: {"dval": 10} + - do: + index: + index: test + type: _doc + id: d2 + body: {"dval": 100} + - do: + index: + index: test + type: _doc + id: d3 + body: {"dval": 1000} + + - do: + indices.refresh: {} + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "saturation(doc['dval'].value, params.k)" + params: + k : 100 + - match: { hits.total: 3 } + - match: { hits.hits.0._id: d3 } + - match: { hits.hits.1._id: d2 } + - match: { hits.hits.2._id: d1 } + + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "sigmoid(doc['dval'].value, params.k, params.a)" + params: + k: 100 + a: 2 + - match: { hits.total: 3 } + - match: { hits.hits.0._id: d3 } + - match: { hits.hits.1._id: d2 } + - match: { hits.hits.2._id: d1 } + --- "Random functions": - do: diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java index fdcb1f54ea7dd..7beddc13ca598 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java @@ -31,9 +31,9 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.List; import java.util.Map; @@ -107,7 +107,7 @@ public String typeName() { } @Override - public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { throw new UnsupportedOperationException( "Field [" + name() + "] of type [" + typeName() + "] doesn't support docvalue_fields or aggregations"); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index d3719ec884fa1..38d635ab3939f 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -59,10 +59,10 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.MultiValueMode; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.math.BigDecimal; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -301,7 +301,7 @@ public Object valueForDisplay(Object value) { } @Override - public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { if (timeZone != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones"); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java index 2eb360255d070..f7288d5039390 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java @@ -31,9 +31,9 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.List; import java.util.Map; @@ -107,7 +107,7 @@ public String typeName() { } @Override - public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { throw new UnsupportedOperationException( "Field [" + name() + "] of type [" + typeName() + "] doesn't support docvalue_fields or aggregations"); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponseTests.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationInspectionHelper.java similarity index 56% rename from server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponseTests.java rename to modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationInspectionHelper.java index fb33dbc5fcbd6..08d137f4fb90d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponseTests.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationInspectionHelper.java @@ -16,18 +16,19 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.action.admin.cluster.bootstrap; +package org.elasticsearch.join.aggregations; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; +/** + * Counterpart to {@link org.elasticsearch.search.aggregations.support.AggregationInspectionHelper}, providing + * helpers for some aggs in the Join package + */ +public class JoinAggregationInspectionHelper { -import static org.hamcrest.Matchers.equalTo; + public static boolean hasValue(InternalParent agg) { + return agg.getDocCount() > 0; + } -public class BootstrapClusterResponseTests extends ESTestCase { - public void testSerialization() throws IOException { - final BootstrapClusterResponse original = new BootstrapClusterResponse(randomBoolean()); - final BootstrapClusterResponse deserialized = copyWriteable(original, writableRegistry(), BootstrapClusterResponse::new); - assertThat(deserialized.getAlreadyBootstrapped(), equalTo(original.getAlreadyBootstrapped())); + public static boolean hasValue(InternalChildren agg) { + return agg.getDocCount() > 0; } } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java index 685c872fa72d4..c8c7200df6114 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java @@ -90,6 +90,7 @@ public void testNoDocs() throws IOException { assertEquals(0, childrenToParent.getDocCount()); assertNotNull("Aggregations: " + childrenToParent.getAggregations().asMap(), parentAggregation); assertEquals(Double.POSITIVE_INFINITY, ((InternalMin) parentAggregation).getValue(), Double.MIN_VALUE); + assertFalse(JoinAggregationInspectionHelper.hasValue(childrenToParent)); }); indexReader.close(); directory.close(); @@ -119,6 +120,7 @@ public void testParentChild() throws IOException { parent.getAggregations().asMap(), expectedTotalParents, parent.getDocCount()); assertEquals(expectedMinValue, ((InternalMin) parent.getAggregations().get("in_parent")).getValue(), Double.MIN_VALUE); + assertTrue(JoinAggregationInspectionHelper.hasValue(parent)); }); // verify for each children @@ -170,6 +172,7 @@ public void testParentChildTerms() throws IOException { // verify a terms-aggregation inside the parent-aggregation testCaseTerms(new MatchAllDocsQuery(), indexSearcher, parent -> { assertNotNull(parent); + assertTrue(JoinAggregationInspectionHelper.hasValue(parent)); LongTerms valueTerms = parent.getAggregations().get("value_terms"); assertNotNull(valueTerms); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java index 452fe1b490b02..9023f3f0485ba 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java @@ -105,6 +105,7 @@ public void testParentChild() throws IOException { expectedMinValue = Math.min(expectedMinValue, expectedValues.v2()); } assertEquals(expectedTotalChildren, child.getDocCount()); + assertTrue(JoinAggregationInspectionHelper.hasValue(child)); assertEquals(expectedMinValue, ((InternalMin) child.getAggregations().get("in_child")).getValue(), Double.MIN_VALUE); }); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 6e4e79d16e5a5..eea01d61386de 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -252,6 +252,7 @@ public void testFromJson() throws IOException { " \"from\" : 0,\n" + " \"size\" : 100,\n" + " \"version\" : false,\n" + + " \"seq_no_primary_term\" : false,\n" + " \"explain\" : false,\n" + " \"track_scores\" : false,\n" + " \"sort\" : [ {\n" + diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java index 7a8d2cd9dbc21..89929985ea594 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java @@ -56,6 +56,8 @@ import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.nestedQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; import static org.elasticsearch.join.query.JoinQueryBuilders.hasParentQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -66,6 +68,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -133,9 +136,10 @@ public void testSimpleParentChild() throws Exception { assertThat(innerHits.getAt(1).getId(), equalTo("c2")); assertThat(innerHits.getAt(1).getType(), equalTo("doc")); + final boolean seqNoAndTerm = randomBoolean(); response = client().prepareSearch("articles") .setQuery(hasChildQuery("comment", matchQuery("message", "elephant"), ScoreMode.None) - .innerHit(new InnerHitBuilder())) + .innerHit(new InnerHitBuilder().setSeqNoAndPrimaryTerm(seqNoAndTerm))) .get(); assertNoFailures(response); assertHitCount(response, 1); @@ -152,6 +156,22 @@ public void testSimpleParentChild() throws Exception { assertThat(innerHits.getAt(2).getId(), equalTo("c6")); assertThat(innerHits.getAt(2).getType(), equalTo("doc")); + if (seqNoAndTerm) { + assertThat(innerHits.getAt(0).getPrimaryTerm(), equalTo(1L)); + assertThat(innerHits.getAt(1).getPrimaryTerm(), equalTo(1L)); + assertThat(innerHits.getAt(2).getPrimaryTerm(), equalTo(1L)); + assertThat(innerHits.getAt(0).getSeqNo(), greaterThanOrEqualTo(0L)); + assertThat(innerHits.getAt(1).getSeqNo(), greaterThanOrEqualTo(0L)); + assertThat(innerHits.getAt(2).getSeqNo(), greaterThanOrEqualTo(0L)); + } else { + assertThat(innerHits.getAt(0).getPrimaryTerm(), equalTo(UNASSIGNED_PRIMARY_TERM)); + assertThat(innerHits.getAt(1).getPrimaryTerm(), equalTo(UNASSIGNED_PRIMARY_TERM)); + assertThat(innerHits.getAt(2).getPrimaryTerm(), equalTo(UNASSIGNED_PRIMARY_TERM)); + assertThat(innerHits.getAt(0).getSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); + assertThat(innerHits.getAt(1).getSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); + assertThat(innerHits.getAt(2).getSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); + } + response = client().prepareSearch("articles") .setQuery( hasChildQuery("comment", matchQuery("message", "fox"), ScoreMode.None).innerHit( diff --git a/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml b/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml index 7273ac6a95d5e..61af4ab1acb59 100644 --- a/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml +++ b/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml @@ -11,26 +11,26 @@ setup: relations: parent: child ---- -"Parent/child inner hits": - - do: - index: - index: test - type: doc - id: 1 - body: {"foo": "bar", "join_field": {"name" : "parent"} } + - do: + index: + index: test + type: doc + id: 1 + body: {"foo": "bar", "join_field": {"name" : "parent"} } - - do: - index: - index: test - type: doc - id: 2 - routing: 1 - body: {"bar": "baz", "join_field": { "name" : "child", "parent": "1"} } + - do: + index: + index: test + type: doc + id: 2 + routing: 1 + body: {"bar": "baz", "join_field": { "name" : "child", "parent": "1"} } - - do: - indices.refresh: {} + - do: + indices.refresh: {} +--- +"Parent/child inner hits": - do: search: rest_total_hits_as_int: true @@ -41,3 +41,24 @@ setup: - match: { hits.hits.0.inner_hits.child.hits.hits.0._index: "test"} - match: { hits.hits.0.inner_hits.child.hits.hits.0._id: "2" } - is_false: hits.hits.0.inner_hits.child.hits.hits.0._nested + +--- +"Parent/child inner hits with seq no": + - skip: + version: " - 6.99.99" + reason: support was added in 7.0 + + - do: + search: + rest_total_hits_as_int: true + body: { "query" : { "has_child" : + { "type" : "child", "query" : { "match_all" : {} }, "inner_hits" : { "seq_no_primary_term": true} } + } } + - match: { hits.total: 1 } + - match: { hits.hits.0._index: "test" } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0.inner_hits.child.hits.hits.0._index: "test"} + - match: { hits.hits.0.inner_hits.child.hits.hits.0._id: "2" } + - is_false: hits.hits.0.inner_hits.child.hits.hits.0._nested + - gte: { hits.hits.0.inner_hits.child.hits.hits.0._seq_no: 0 } + - gte: { hits.hits.0.inner_hits.child.hits.hits.0._primary_term: 1 } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index 2f801811327b8..0f985fd37016a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -179,7 +179,7 @@ public void testInitialSearchParamsMisc() { fetchVersion = randomBoolean(); searchRequest.source().version(fetchVersion); } - + Map params = initialSearch(searchRequest, query, remoteVersion).getParameters(); if (scroll == null) { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java index 28d32f50bfc69..bc24789341e04 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java @@ -36,12 +36,12 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.InboundMessage; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportSettings; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -111,13 +111,9 @@ public ExceptionThrowingNetty4Transport( } @Override - protected String handleRequest(TcpChannel channel, String profileName, - StreamInput stream, long requestId, int messageLengthBytes, Version version, - InetSocketAddress remoteAddress, byte status) throws IOException { - String action = super.handleRequest(channel, profileName, stream, requestId, messageLengthBytes, version, - remoteAddress, status); + protected void handleRequest(TcpChannel channel, InboundMessage.RequestMessage request, int messageLengthBytes) throws IOException { + super.handleRequest(channel, request, messageLengthBytes); channelProfileName = TransportSettings.DEFAULT_PROFILE; - return action; } @Override diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..927a881df73f1 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +ea440f53a9e858c2ed87927c63d57eb70e5af9ee \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 4d7a031054064..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5b0beeaa95c28e5e0679d684d4e2e30e90cf53e7 \ No newline at end of file diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java index d4cc9ee9d6e89..a228283527d66 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -46,9 +46,9 @@ import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -208,7 +208,7 @@ public BytesRef parseBytesRef(String value) { }; @Override - public DocValueFormat docValueFormat(final String format, final DateTimeZone timeZone) { + public DocValueFormat docValueFormat(final String format, final ZoneId timeZone) { return COLLATE_FORMAT; } } diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..075bd24f3e609 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +b016fdfb8f1ac413d902cd4d244b295c9f01e610 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 0c588e1607a0d..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fff58bb761b71ded4bf1bfd41bad522df5c67f5c \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..d5046efc95935 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +395c8291751ffa7fbbb96faf578408a33a34ad1d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 78a9e33ed22e1..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -167a2b06379c4a1233a866ea88df17bb481e7200 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..19a0a9a198ce0 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +6957c71604356a1fbf0e13794595e4ea42126dd7 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 1ea10265dcfef..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -77a1ed8e4544c31b7c949c9f0ddb7bc2a53adfb9 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..91597e3941158 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +09f42235a8daca4ca8ea604314c2eb9de51b9e98 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 0e69798085eb1..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49b825fc84de3f993eb161d1a38fdeefa9b5511a \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..a868f7d5a053f --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +7cf7eeb685a2060e97f50551236cfcaf39990083 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 85d83e7674670..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c60a0b708cd1c61ec34191df1bcf99b9211c08f \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..4f5f80aa96b91 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +ff163fb06ec3e47d501b290d8f69cca88a0341cc \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index eab26671dcaa3..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -553be14e6c3bb82b7e70f76ce2d294e4fa26fc20 \ No newline at end of file diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index 9e42799b4da03..d68a9ee397645 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -32,7 +32,7 @@ dependencies { } if (System.getProperty('tests.distribution') == null) { - integTestCluster.distribution = 'oss-zip' + integTestCluster.distribution = 'oss' } unitTest.enabled = false diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 58dddb142c0dd..541e6dc42f553 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -169,6 +169,90 @@ buildscript { } } +private static int freePort(String minioAddress) { + int minioPort + ServerSocket serverSocket = new ServerSocket(0, 1, InetAddress.getByName(minioAddress)) + try { + minioPort = serverSocket.localPort + } finally { + serverSocket.close() + } + if (minioPort == 0) { + throw new GradleException("Could not find a free port for Minio") + } + return minioPort +} + +private int getMinioPid(Process minioProcess) { + int minioPid + if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { + try { + Class cProcessImpl = minioProcess.getClass() + Field fPid = cProcessImpl.getDeclaredField("pid") + if (!fPid.isAccessible()) { + fPid.setAccessible(true) + } + minioPid = fPid.getInt(minioProcess) + } catch (Exception e) { + logger.error("failed to read pid from minio process", e) + minioProcess.destroyForcibly() + throw e + } + } else { + minioPid = minioProcess.pid() + } + return minioPid +} + +private static Process setupMinio(String minioAddress, int minioPort, String minioDataDir, String accessKey, String secretKey, + String minioBinDir, String minioFileName) { + // we skip these tests on Windows so we do no need to worry about compatibility here + final ProcessBuilder minio = new ProcessBuilder( + "${minioBinDir}/${minioFileName}", + "server", + "--address", + minioAddress + ":" + minioPort, + minioDataDir) + minio.environment().put('MINIO_ACCESS_KEY', accessKey) + minio.environment().put('MINIO_SECRET_KEY', secretKey) + return minio.start() +} + +private void addShutdownHook(Process minioProcess, int minioPort, int minioPid) { + new BufferedReader(new InputStreamReader(minioProcess.inputStream)).withReader { br -> + String line + int httpPort = 0 + while ((line = br.readLine()) != null) { + logger.info(line) + if (line.matches('.*Endpoint.*:\\d+$')) { + assert httpPort == 0 + final int index = line.lastIndexOf(":") + assert index >= 0 + httpPort = Integer.parseInt(line.substring(index + 1)) + if (httpPort != minioPort) { + throw new IllegalStateException("Port mismatch, expected ${minioPort} but was ${httpPort}") + } + + final File script = new File(project.buildDir, "minio/minio.killer.sh") + script.setText( + ["function shutdown {", + " kill ${minioPid}", + "}", + "trap shutdown EXIT", + // will wait indefinitely for input, but we never pass input, and the pipe is only closed when the build dies + "read line\n"].join('\n'), 'UTF-8') + final ProcessBuilder killer = new ProcessBuilder("bash", script.absolutePath) + killer.start() + break + } + } + + if (httpPort <= 0) { + throw new IllegalStateException("httpPort must be > 0") + } + } +} + if (useFixture && minioDistribution) { apply plugin: 'de.undercouch.download' @@ -201,72 +285,28 @@ if (useFixture && minioDistribution) { ext.minioPort = 0 doLast { - // get free port - ServerSocket serverSocket = new ServerSocket(0, 1, InetAddress.getByName(minioAddress)) - try { - minioPort = serverSocket.localPort - } finally { - serverSocket.close() - } - if (minioPort == 0) { - throw new GradleException("Could not find a free port for Minio") - } - new File("${minioDataDir}/${s3PermanentBucket}").mkdirs() - // we skip these tests on Windows so we do no need to worry about compatibility here - final ProcessBuilder minio = new ProcessBuilder( - "${minioBinDir}/${minioFileName}", - "server", - "--address", - minioAddress + ":" + minioPort, - minioDataDir) - minio.environment().put('MINIO_ACCESS_KEY', s3PermanentAccessKey) - minio.environment().put('MINIO_SECRET_KEY', s3PermanentSecretKey) - final Process process = minio.start() - if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { + + Exception accumulatedException = null + for (int i = 0; i < 5; ++i) { try { - Class cProcessImpl = process.getClass() - Field fPid = cProcessImpl.getDeclaredField("pid") - if (!fPid.isAccessible()) { - fPid.setAccessible(true) - } - minioPid = fPid.getInt(process) + minioPort = freePort(minioAddress) + final Process process = + setupMinio(minioAddress, minioPort, minioDataDir, s3PermanentAccessKey, s3PermanentSecretKey, minioBinDir, minioFileName) + minioPid = getMinioPid(process) + addShutdownHook(process, minioPort, minioPid) + break } catch (Exception e) { - logger.error("failed to read pid from minio process", e) - process.destroyForcibly() - throw e - } - } else { - minioPid = process.pid() - } - - new BufferedReader(new InputStreamReader(process.getInputStream())).withReader { br -> - String line - int httpPort = 0 - while ((line = br.readLine()) != null) { - logger.info(line) - if (line.matches('.*Endpoint.*:\\d+$')) { - assert httpPort == 0 - final int index = line.lastIndexOf(":") - assert index >= 0 - httpPort = Integer.parseInt(line.substring(index + 1)) - assert httpPort == minioPort : "Port mismatch, expected ${minioPort} but was ${httpPort}" - - final File script = new File(project.buildDir, "minio/minio.killer.sh") - script.setText( - ["function shutdown {", - " kill ${minioPid}", - "}", - "trap shutdown EXIT", - // will wait indefinitely for input, but we never pass input, and the pipe is only closed when the build dies - "read line\n"].join('\n'), 'UTF-8') - final ProcessBuilder killer = new ProcessBuilder("bash", script.absolutePath) - killer.start() - break + logger.error("Exception while trying to start Minio {}", e) + if (accumulatedException == null) { + accumulated = e + } else { + accumulatedException.addSuppressed(e) } } - - assert httpPort > 0 + } + if (accumulatedException != null) { + throw new GradleException("Failed to start Minio", accumulatedException) } } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java index aa77a189cb78e..a5f274c7ccd34 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -26,13 +26,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.AbstractHttpServerTransport; import org.elasticsearch.http.HttpChannel; @@ -41,7 +39,6 @@ import org.elasticsearch.http.nio.cors.NioCorsConfigBuilder; import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.ChannelFactory; -import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSelector; @@ -50,6 +47,7 @@ import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.nio.NioGroupFactory; import java.io.IOException; import java.net.InetSocketAddress; @@ -61,8 +59,6 @@ import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; -import static org.elasticsearch.common.settings.Setting.intSetting; -import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; @@ -83,15 +79,9 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { private static final Logger logger = LogManager.getLogger(NioHttpServerTransport.class); - public static final Setting NIO_HTTP_ACCEPTOR_COUNT = - intSetting("http.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); - public static final Setting NIO_HTTP_WORKER_COUNT = - new Setting<>("http.nio.worker_count", - (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), - (s) -> Setting.parseInt(s, 1, "http.nio.worker_count"), Setting.Property.NodeScope); - protected final PageCacheRecycler pageCacheRecycler; protected final NioCorsConfig corsConfig; + private final NioGroupFactory nioGroupFactory; protected final boolean tcpNoDelay; protected final boolean tcpKeepAlive; @@ -99,14 +89,15 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { protected final int tcpSendBufferSize; protected final int tcpReceiveBufferSize; - private NioGroup nioGroup; + private volatile NioGroup nioGroup; private ChannelFactory channelFactory; public NioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, - Dispatcher dispatcher) { + Dispatcher dispatcher, NioGroupFactory nioGroupFactory) { super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher); this.pageCacheRecycler = pageCacheRecycler; + this.nioGroupFactory = nioGroupFactory; ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); @@ -134,11 +125,7 @@ public Logger getLogger() { protected void doStart() { boolean success = false; try { - int acceptorCount = NIO_HTTP_ACCEPTOR_COUNT.get(settings); - int workerCount = NIO_HTTP_WORKER_COUNT.get(settings); - nioGroup = new NioGroup(daemonThreadFactory(this.settings, HTTP_SERVER_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, - daemonThreadFactory(this.settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX), workerCount, - (s) -> new EventHandler(this::onNonChannelException, s)); + nioGroup = nioGroupFactory.getHttpGroup(); channelFactory = channelFactory(); bindServer(); success = true; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioGroupFactory.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioGroupFactory.java new file mode 100644 index 0000000000000..0c17c1d8b85e5 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioGroupFactory.java @@ -0,0 +1,172 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.nio.ChannelFactory; +import org.elasticsearch.nio.EventHandler; +import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelectorGroup; +import org.elasticsearch.nio.NioServerSocketChannel; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.transport.TcpTransport; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.InetSocketAddress; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; + +/** + * Creates and returns {@link NioSelectorGroup} instances. It will return a shared group for + * both {@link #getHttpGroup()} and {@link #getTransportGroup()} if + * {@link NioTransportPlugin#NIO_HTTP_WORKER_COUNT} is configured to be 0. If that setting is not 0, then it + * will return a different group in the {@link #getHttpGroup()} call. + */ +public final class NioGroupFactory { + + private final Logger logger; + private final Settings settings; + private final int httpWorkerCount; + + private RefCountedNioGroup refCountedGroup; + + public NioGroupFactory(Settings settings, Logger logger) { + this.logger = logger; + this.settings = settings; + this.httpWorkerCount = NioTransportPlugin.NIO_HTTP_WORKER_COUNT.get(settings); + } + + public Settings getSettings() { + return settings; + } + + public synchronized NioGroup getTransportGroup() throws IOException { + return getGenericGroup(); + } + + public synchronized NioGroup getHttpGroup() throws IOException { + if (httpWorkerCount == 0) { + return getGenericGroup(); + } else { + return new NioSelectorGroup(daemonThreadFactory(this.settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX), + httpWorkerCount, (s) -> new EventHandler(this::onException, s)); + } + } + + private NioGroup getGenericGroup() throws IOException { + if (refCountedGroup == null) { + ThreadFactory threadFactory = daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX); + NioSelectorGroup nioGroup = new NioSelectorGroup(threadFactory, NioTransportPlugin.NIO_WORKER_COUNT.get(settings), + (s) -> new EventHandler(this::onException, s)); + this.refCountedGroup = new RefCountedNioGroup(nioGroup); + return new WrappedNioGroup(refCountedGroup); + } else { + refCountedGroup.incRef(); + return new WrappedNioGroup(refCountedGroup); + } + } + + private void onException(Exception exception) { + logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), + exception); + } + + private static class RefCountedNioGroup extends AbstractRefCounted implements NioGroup { + + public static final String NAME = "ref-counted-nio-group"; + private final NioSelectorGroup nioGroup; + + private RefCountedNioGroup(NioSelectorGroup nioGroup) { + super(NAME); + this.nioGroup = nioGroup; + } + + @Override + protected void closeInternal() { + try { + nioGroup.close(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + public S bindServerChannel(InetSocketAddress address, ChannelFactory factory) + throws IOException { + return nioGroup.bindServerChannel(address, factory); + } + + @Override + public S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException { + return nioGroup.openChannel(address, factory); + } + + @Override + public void close() throws IOException { + throw new UnsupportedOperationException("Should not close. Instead use decRef call."); + } + } + + /** + * Wraps the {@link RefCountedNioGroup}. Calls {@link RefCountedNioGroup#decRef()} on close. After close, + * this wrapped instance can no longer be used. + */ + private class WrappedNioGroup implements NioGroup { + + private final RefCountedNioGroup refCountedNioGroup; + + private final AtomicBoolean isOpen = new AtomicBoolean(true); + + private WrappedNioGroup(RefCountedNioGroup refCountedNioGroup) { + this.refCountedNioGroup = refCountedNioGroup; + } + + public S bindServerChannel(InetSocketAddress address, ChannelFactory factory) + throws IOException { + ensureOpen(); + return refCountedNioGroup.bindServerChannel(address, factory); + } + + public S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException { + ensureOpen(); + return refCountedNioGroup.openChannel(address, factory); + } + + @Override + public void close() { + if (isOpen.compareAndSet(true, false)) { + refCountedNioGroup.decRef(); + } + } + + private void ensureOpen() { + if (isOpen.get() == false) { + throw new IllegalStateException("NioGroup is closed."); + } + } + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index c5f5b414c07c9..a9a1c716ef82a 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -27,14 +27,11 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.ChannelFactory; -import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSelector; @@ -54,25 +51,21 @@ import java.util.function.Supplier; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; -import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; public class NioTransport extends TcpTransport { private static final Logger logger = LogManager.getLogger(NioTransport.class); - public static final Setting NIO_WORKER_COUNT = - new Setting<>("transport.nio.worker_count", - (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), - (s) -> Setting.parseInt(s, 1, "transport.nio.worker_count"), Setting.Property.NodeScope); - private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); + private final NioGroupFactory groupFactory; private volatile NioGroup nioGroup; private volatile Function clientChannelFactory; protected NioTransport(Settings settings, Version version, ThreadPool threadPool, NetworkService networkService, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, - CircuitBreakerService circuitBreakerService) { + CircuitBreakerService circuitBreakerService, NioGroupFactory groupFactory) { super("nio", settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + this.groupFactory = groupFactory; } @Override @@ -91,8 +84,7 @@ protected NioTcpChannel initiateChannel(DiscoveryNode node) throws IOException { protected void doStart() { boolean success = false; try { - nioGroup = new NioGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), - NioTransport.NIO_WORKER_COUNT.get(settings), (s) -> new EventHandler(this::onNonChannelException, s)); + nioGroup = groupFactory.getTransportGroup(); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); clientChannelFactory = clientChannelFactoryFunction(clientProfileSettings); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java index 9c73cbc5a662a..647b53ad47a59 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java @@ -19,6 +19,9 @@ package org.elasticsearch.transport.nio; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -26,6 +29,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.nio.NioHttpServerTransport; @@ -41,17 +45,29 @@ import java.util.Map; import java.util.function.Supplier; +import static org.elasticsearch.common.settings.Setting.intSetting; + public class NioTransportPlugin extends Plugin implements NetworkPlugin { public static final String NIO_TRANSPORT_NAME = "nio-transport"; public static final String NIO_HTTP_TRANSPORT_NAME = "nio-http-transport"; + private static final Logger logger = LogManager.getLogger(NioTransportPlugin.class); + + public static final Setting NIO_WORKER_COUNT = + new Setting<>("transport.nio.worker_count", + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), + (s) -> Setting.parseInt(s, 1, "transport.nio.worker_count"), Setting.Property.NodeScope); + public static final Setting NIO_HTTP_WORKER_COUNT = + intSetting("http.nio.worker_count", 0, 0, Setting.Property.NodeScope); + + private final SetOnce groupFactory = new SetOnce<>(); + @Override public List> getSettings() { return Arrays.asList( - NioHttpServerTransport.NIO_HTTP_ACCEPTOR_COUNT, - NioHttpServerTransport.NIO_HTTP_WORKER_COUNT, - NioTransport.NIO_WORKER_COUNT + NIO_HTTP_WORKER_COUNT, + NIO_WORKER_COUNT ); } @@ -61,7 +77,7 @@ public Map> getTransports(Settings settings, ThreadP NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { return Collections.singletonMap(NIO_TRANSPORT_NAME, () -> new NioTransport(settings, Version.CURRENT, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, - circuitBreakerService)); + circuitBreakerService, getNioGroupFactory(settings))); } @Override @@ -73,6 +89,17 @@ public Map> getHttpTransports(Settings set HttpServerTransport.Dispatcher dispatcher) { return Collections.singletonMap(NIO_HTTP_TRANSPORT_NAME, () -> new NioHttpServerTransport(settings, networkService, bigArrays, pageCacheRecycler, threadPool, xContentRegistry, - dispatcher)); + dispatcher, getNioGroupFactory(settings))); + } + + private synchronized NioGroupFactory getNioGroupFactory(Settings settings) { + NioGroupFactory nioGroupFactory = groupFactory.get(); + if (nioGroupFactory != null) { + assert nioGroupFactory.getSettings().equals(settings) : "Different settings than originally provided"; + return nioGroupFactory; + } else { + groupFactory.set(new NioGroupFactory(settings, logger)); + return groupFactory.get(); + } } } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java index 703f7acbf8257..9ed64213cbf5f 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java @@ -20,10 +20,8 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.http.nio.NioHttpServerTransport; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.transport.nio.NioTransport; import org.elasticsearch.transport.nio.NioTransportPlugin; import java.util.Collection; @@ -46,8 +44,8 @@ protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); // randomize nio settings if (randomBoolean()) { - builder.put(NioTransport.NIO_WORKER_COUNT.getKey(), random().nextInt(3) + 1); - builder.put(NioHttpServerTransport.NIO_HTTP_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + builder.put(NioTransportPlugin.NIO_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + builder.put(NioTransportPlugin.NIO_HTTP_WORKER_COUNT.getKey(), random().nextInt(3) + 1); } builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME); builder.put(NetworkModule.HTTP_TYPE_KEY, NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java index b49d5b5186628..e3259b10b9745 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java @@ -45,7 +45,7 @@ import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelectorGroup; import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; @@ -88,11 +88,11 @@ static Collection returnOpaqueIds(Collection responses private static final Logger logger = LogManager.getLogger(NioHttpClient.class); - private final NioGroup nioGroup; + private final NioSelectorGroup nioGroup; NioHttpClient() { try { - nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "nio-http-client"), 1, + nioGroup = new NioSelectorGroup(daemonThreadFactory(Settings.EMPTY, "nio-http-client"), 1, (s) -> new EventHandler(this::onException, s)); } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java index 13b8e60336e02..2ffd5a64147cc 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.junit.After; import org.junit.Before; @@ -202,7 +203,7 @@ public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadC } }; try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, threadPool, - xContentRegistry(), dispatcher)) { + xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger))) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); try (NioHttpClient client = new NioHttpClient()) { @@ -235,12 +236,12 @@ public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadC public void testBindUnavailableAddress() { try (NioHttpServerTransport transport = new NioHttpServerTransport(Settings.EMPTY, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), new NullDispatcher())) { + threadPool, xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger))) { transport.start(); TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); try (NioHttpServerTransport otherTransport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), new NullDispatcher())) { + threadPool, xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger))) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); } @@ -284,7 +285,7 @@ public void dispatchBadRequest(final RestRequest request, } try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), dispatcher)) { + threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger))) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioGroupFactoryTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioGroupFactoryTests.java new file mode 100644 index 0000000000000..356eb39b73449 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioGroupFactoryTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.nio.ChannelFactory; +import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelector; +import org.elasticsearch.nio.NioServerSocketChannel; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ServerChannelContext; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.function.Consumer; + +public class NioGroupFactoryTests extends ESTestCase { + + public void testSharedGroupStillWorksWhenOneInstanceClosed() throws IOException { + NioGroupFactory groupFactory = new NioGroupFactory(Settings.EMPTY, logger); + + InetSocketAddress inetSocketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + NioGroup httpGroup = groupFactory.getHttpGroup(); + try { + NioGroup transportGroup = groupFactory.getTransportGroup(); + transportGroup.close(); + expectThrows(IllegalStateException.class, () -> transportGroup.bindServerChannel(inetSocketAddress, new BindingFactory())); + + httpGroup.bindServerChannel(inetSocketAddress, new BindingFactory()); + } finally { + httpGroup.close(); + } + expectThrows(IllegalStateException.class, () -> httpGroup.bindServerChannel(inetSocketAddress, new BindingFactory())); + } + + private static class BindingFactory extends ChannelFactory { + + private BindingFactory() { + super(new ChannelFactory.RawChannelFactory(false, false, false, -1, -1)); + } + + @Override + public NioSocketChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { + throw new IOException("boom"); + } + + @Override + public NioServerSocketChannel createServerChannel(NioSelector selector, ServerSocketChannel channel) throws IOException { + NioServerSocketChannel nioChannel = new NioServerSocketChannel(channel); + Consumer exceptionHandler = (e) -> {}; + Consumer acceptor = (c) -> {}; + ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, acceptor, exceptionHandler); + nioChannel.setContext(context); + return nioChannel; + } + } +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java index 61f07aa016207..087c3758bb98b 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.transport.nio; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.NioIntegTestCase; import org.elasticsearch.Version; @@ -36,12 +38,12 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.InboundMessage; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportSettings; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -54,6 +56,7 @@ @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) public class NioTransportIT extends NioIntegTestCase { + // static so we can use it in anonymous classes private static String channelProfileName = null; @@ -86,6 +89,8 @@ public void testThatConnectionFailsAsIntended() throws Exception { public static final class ExceptionThrowingNioTransport extends NioTransport { + private static final Logger logger = LogManager.getLogger(ExceptionThrowingNioTransport.class); + public static class TestPlugin extends Plugin implements NetworkPlugin { @Override @@ -103,17 +108,14 @@ public Map> getTransports(Settings settings, ThreadP ExceptionThrowingNioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { - super(settings, Version.CURRENT, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService); + super(settings, Version.CURRENT, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, + new NioGroupFactory(settings, logger)); } @Override - protected String handleRequest(TcpChannel channel, String profileName, - StreamInput stream, long requestId, int messageLengthBytes, Version version, - InetSocketAddress remoteAddress, byte status) throws IOException { - String action = super.handleRequest(channel, profileName, stream, requestId, messageLengthBytes, version, - remoteAddress, status); + protected void handleRequest(TcpChannel channel, InboundMessage.RequestMessage request, int messageLengthBytes) throws IOException { + super.handleRequest(channel, request, messageLengthBytes); channelProfileName = TransportSettings.DEFAULT_PROFILE; - return action; } @Override diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java index 82a991036270d..a78e924298a9e 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java @@ -52,12 +52,12 @@ public class SimpleNioTransportTests extends AbstractSimpleTransportTestCase { - public static MockTransportService nioFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, + public MockTransportService nioFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); NetworkService networkService = new NetworkService(Collections.emptyList()); Transport transport = new NioTransport(settings, version, threadPool, networkService, new MockPageCacheRecycler(settings), - namedWriteableRegistry, new NoneCircuitBreakerService()) { + namedWriteableRegistry, new NoneCircuitBreakerService(), new NioGroupFactory(settings, logger)) { @Override public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionProfile profile, diff --git a/qa/build.gradle b/qa/build.gradle index 0336b947d06aa..cbcb1d4580704 100644 --- a/qa/build.gradle +++ b/qa/build.gradle @@ -4,8 +4,8 @@ import org.elasticsearch.gradle.test.RestIntegTestTask subprojects { Project subproj -> subproj.tasks.withType(RestIntegTestTask) { subproj.extensions.configure("${it.name}Cluster") { cluster -> - cluster.distribution = System.getProperty('tests.distribution', 'oss-zip') - if (cluster.distribution == 'zip') { + cluster.distribution = System.getProperty('tests.distribution', 'oss') + if (cluster.distribution == 'default') { /* * Add Elastic's repositories so we can resolve older versions of the * default distribution. Those aren't in maven central. diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java index 64cec9224965b..d118df9abde47 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java @@ -219,6 +219,7 @@ public void testExecutionExceptionOnAutoQueueFixedESThreadPoolExecutor() throws } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37708") public void testExecutionExceptionOnSinglePrioritizingThreadPoolExecutor() throws InterruptedException { final PrioritizedEsThreadPoolExecutor prioritizedExecutor = EsExecutors.newSinglePrioritizing("test", EsExecutors.daemonThreadFactory("test"), threadPool.getThreadContext(), threadPool.scheduler()); diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 70afa9bf917e8..5acf84139bbf4 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -30,6 +30,7 @@ rest_total_hits_as_int: true index: test_index,my_remote_cluster:test_index body: + seq_no_primary_term: true aggs: cluster: terms: @@ -37,6 +38,8 @@ - match: { _shards.total: 5 } - match: { hits.total: 11 } + - gte: { hits.hits.0._seq_no: 0 } + - gte: { hits.hits.0._primary_term: 1 } - length: { aggregations.cluster.buckets: 2 } - match: { aggregations.cluster.buckets.0.key: "remote_cluster" } - match: { aggregations.cluster.buckets.0.doc_count: 6 } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java index 1704deada1762..ba1cd80fc88a3 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java @@ -30,13 +30,13 @@ /** * Basic tests for simple xpack functionality that are only run if the - * cluster is the on the "zip" distribution. + * cluster is the on the default distribution. */ public class XPackIT extends AbstractRollingTestCase { @Before public void skipIfNotXPack() { assumeThat("test is only supported if the distribution contains xpack", - System.getProperty("tests.distribution"), equalTo("zip")); + System.getProperty("tests.distribution"), equalTo("default")); assumeThat("running this on the unupgraded cluster would change its state and it wouldn't work prior to 6.3 anyway", CLUSTER_TYPE, equalTo(ClusterType.UPGRADED)); /* diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml index 0f8b5517dd4d2..ea0984ef3bcbf 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml @@ -9,7 +9,7 @@ index: timetest body: mappings: - test: { "properties": { "my_time": {"type": "date"}}} + test: { "properties": { "my_time": {"type": "date", "format": "strict_date_optional_time_nanos"}}} - do: ingest.put_pipeline: diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 9d299e16f0210..cd64262fcdce1 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -29,7 +29,7 @@ integTestCluster { } integTestRunner { - if ('zip'.equals(integTestCluster.distribution)) { + if ('default'.equals(integTestCluster.distribution)) { systemProperty 'tests.rest.blacklist', [ 'cat.templates/10_basic/No templates', 'cat.templates/10_basic/Sort templates', diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 5834ca623a99b..9ac02b1214a2f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -164,6 +164,10 @@ "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" }, + "seq_no_primary_term": { + "type" : "boolean", + "description" : "Specify whether to return sequence number and primary term of the last modification of each hit" + }, "request_cache": { "type" : "boolean", "description" : "Specify if request cache should be used for this request or not, defaults to index level setting" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index d4b04ce25110b..a9b6639359e24 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -316,6 +316,8 @@ Supports also regular expressions with flag X for more readability (accepts whit \d+ \s+ \d{2}:\d{2}:\d{2} \s+ \d+ \s+ \n $/ .... +**Note:** `$body` is used to refer to the last obtained response body as a string, while `''` refers to the parsed representation (parsed into a Map by the Java runner for instance). Having the raw string response is for example useful when testing cat APIs. + === `lt` and `gt` Compares two numeric values, eg: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml index 267a9e85d1d5d..775475e01a597 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml @@ -1,8 +1,4 @@ ---- -"top_hits aggregation with nested documents": - - skip: - version: " - 6.1.99" - reason: "<= 6.1 nodes don't always include index or id in nested top hits" +setup: - do: indices.create: index: my-index @@ -54,6 +50,12 @@ ] } +--- +"top_hits aggregation with nested documents": + - skip: + version: " - 6.1.99" + reason: "<= 6.1 nodes don't always include index or id in nested top hits" + - do: search: rest_total_hits_as_int: true @@ -81,3 +83,35 @@ - match: { aggregations.to-users.users.hits.hits.2._index: my-index } - match: { aggregations.to-users.users.hits.hits.2._nested.field: users } - match: { aggregations.to-users.users.hits.hits.2._nested.offset: 1 } + + +--- +"top_hits aggregation with sequence numbers": + - skip: + version: " - 6.99.99" + reason: support was added in 7.0 + + - do: + search: + rest_total_hits_as_int: true + body: + aggs: + groups: + terms: + field: group.keyword + aggs: + users: + top_hits: + sort: "users.last.keyword" + seq_no_primary_term: true + + - match: { hits.total: 2 } + - length: { aggregations.groups.buckets.0.users.hits.hits: 2 } + - match: { aggregations.groups.buckets.0.users.hits.hits.0._id: "1" } + - match: { aggregations.groups.buckets.0.users.hits.hits.0._index: my-index } + - gte: { aggregations.groups.buckets.0.users.hits.hits.0._seq_no: 0 } + - gte: { aggregations.groups.buckets.0.users.hits.hits.0._primary_term: 1 } + - match: { aggregations.groups.buckets.0.users.hits.hits.1._id: "2" } + - match: { aggregations.groups.buckets.0.users.hits.hits.1._index: my-index } + - gte: { aggregations.groups.buckets.0.users.hits.hits.1._seq_no: 0 } + - gte: { aggregations.groups.buckets.0.users.hits.hits.1._primary_term: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index 7ba1d9b62fbc7..a85abb4e6e28b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -405,3 +405,57 @@ setup: - match: { hits.hits.1.inner_hits.sub_hits.hits.total: 3} - match: { hits.hits.2.fields.group_alias: [25] } - match: { hits.hits.2.inner_hits.sub_hits.hits.total: 2} + +--- +"field collapsing, inner_hits and seq_no": + + - skip: + version: " - 6.99.0" + reason: "sequence numbers introduced in 7.0.0" + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + collapse: { field: numeric_group, inner_hits: { + name: sub_hits, seq_no_primary_term: true, size: 2, sort: [{ sort: asc }] + } } + sort: [{ sort: desc }] + + - match: { hits.total: 6 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0.fields.numeric_group: [3] } + - match: { hits.hits.0.sort: [36] } + - match: { hits.hits.0._id: "6" } + - match: { hits.hits.0.inner_hits.sub_hits.hits.total: 1 } + - length: { hits.hits.0.inner_hits.sub_hits.hits.hits: 1 } + - match: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._id: "6" } + - gte: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._seq_no: 0 } + - gte: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._primary_term: 1 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1.fields.numeric_group: [1] } + - match: { hits.hits.1.sort: [24] } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.1.inner_hits.sub_hits.hits.total: 3 } + - length: { hits.hits.1.inner_hits.sub_hits.hits.hits: 2 } + - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.0._id: "2" } + - gte: { hits.hits.1.inner_hits.sub_hits.hits.hits.0._seq_no: 0 } + - gte: { hits.hits.1.inner_hits.sub_hits.hits.hits.0._primary_term: 1 } + - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._id: "1" } + - gte: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._seq_no: 0 } + - gte: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._primary_term: 1 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._type: test } + - match: { hits.hits.2.fields.numeric_group: [25] } + - match: { hits.hits.2.sort: [10] } + - match: { hits.hits.2._id: "4" } + - match: { hits.hits.2.inner_hits.sub_hits.hits.total: 2 } + - length: { hits.hits.2.inner_hits.sub_hits.hits.hits: 2 } + - match: { hits.hits.2.inner_hits.sub_hits.hits.hits.0._id: "5" } + - gte: { hits.hits.2.inner_hits.sub_hits.hits.hits.0._seq_no: 0 } + - gte: { hits.hits.2.inner_hits.sub_hits.hits.hits.0._primary_term: 1 } + - match: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._id: "4" } + - gte: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._seq_no: 0 } + - gte: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._primary_term: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_local_dependent_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml similarity index 100% rename from rest-api-spec/src/main/resources/rest-api-spec/test/search/180_local_dependent_mapping.yml rename to rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml new file mode 100644 index 0000000000000..9e838d1c58f77 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml @@ -0,0 +1,74 @@ +setup: + - do: + indices.create: + index: test_1 + + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: foo } + +## we index again in order to make the seq# 1 (so we can check for the field existence with is_false) + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + + - do: + indices.refresh: + index: [test_1] + +--- +"sequence numbers are returned if requested from body": + - skip: + version: " - 6.99.99" + reason: sequence numbers were added in 7.0.0 + + - do: + search: + index: _all + body: + query: + match: + foo: bar + seq_no_primary_term: true + + - match: {hits.total.value: 1} + - match: {hits.hits.0._seq_no: 1} + - gte: {hits.hits.0._primary_term: 1} + +--- +"sequence numbers are returned if requested from url": + - skip: + version: " - 6.99.99" + reason: sequence numbers were added in 7.0.0 + + - do: + search: + index: _all + body: + query: + match: + foo: bar + seq_no_primary_term: true + + - match: {hits.total.value: 1} + - match: {hits.hits.0._seq_no: 1} + - gte: {hits.hits.0._primary_term: 1} + +--- +"sequence numbers are not returned if not requested": + - do: + search: + index: _all + body: + query: + match: + foo: bar + + - is_false: hits.hits.0._seq_no + - is_false: hits.hits.0._primary_term diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..4dc7f4b4ca6bf --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +f4d286ed7940fc206331424e8ce577584208eba3 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index be6bfec6e5563..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66e4d1a3f91be88d903b1c75e71c8b15a7dc4f90 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..df0783e11fee6 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +e5416da7370f5def9a79fb1cccb091b091b808a5 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 357ce92760e39..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -069cfb0c693d365cbf332973ab796ba33646f867 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..7a4b68f8d857d --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +e3a95ff3cbd96e2c05b90932c20ca6374cdcdbe9 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index efbb9ada534a5..0000000000000 --- a/server/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad32720fe677becb93a26692338b63754613aa50 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..e1933653b9f4d --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +045dcae6368a436cec6e56f654a9c6b3a4656d17 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 7f35ad20b8230..0000000000000 --- a/server/licenses/lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f9499ffc5e956f7a113308198e74e80b7df290b \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..85aff2f06dd49 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +63b7164d24547cea36df3d81eedb9e8b424cf3c2 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 566114f72948f..0000000000000 --- a/server/licenses/lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -162cac09191f267039cdb73ccc7face5ef54ba8b \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..41b3910da1319 --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +7b3358889c491237709e8aea4a39e816227f3d26 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index baa09c01bed13..0000000000000 --- a/server/licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -106cbbc96feb7413b17539f8a0ae2e7692550f44 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..22318a34bf09e --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +5d9566e2b3be41c81fe42df1c57fff31fcdbc565 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index ba3d8ec94d0ac..0000000000000 --- a/server/licenses/lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -37336dec582ce1569e944d1d8a5181c2eb2aec25 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..2c4429c8c7619 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +45574e22c4b1569a2aecd5b2837388c7287892b5 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 764fffadffe99..0000000000000 --- a/server/licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c18617a95c109160d0dacb58a1e268014f7f5862 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..ae11e5858a28c --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +67405660330db8c09e0994a615cf3ab95d7bc151 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index e50a454a0ebd9..0000000000000 --- a/server/licenses/lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab86036efd74fc41730e20dd2d3de977297878e0 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..fcb52584b667c --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +c8de64bf2c4f09766d4cd62e8dd403016665f37c \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 8b2d290d647ed..0000000000000 --- a/server/licenses/lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02f3f472494f250da6fe7199de6c2f2ef5972774 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..80750b2d41e33 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +fc080b45b881a78a23743df9a7388fd3dbbb9d66 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 2466df430847f..0000000000000 --- a/server/licenses/lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da0a248a2bb69499715411b682d45adaea5ab499 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..32f5ca7196e17 --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +6e0e12b9882005b87ef39325e1fc8539c8caff31 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index c8b8762f25a92..0000000000000 --- a/server/licenses/lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8ed0aad4c4214d0fe3571dfa2d09c936a91cf3c7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..2cf474908d1ba --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +7ed9c9a03e1a15840237d09ca4da7aadfce1c780 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 17b2f3ef4a33c..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9105a1c73feeb836ca4244367f02d6a8d7e3cc27 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..c4016fb692e2f --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +fdf0efdeb9a73a6b0f8349df21049ecbc73955d7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 62d3f24344e33..0000000000000 --- a/server/licenses/lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6ebc95227d4415cc6d345c1dd3759e1f348e0ca4 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..7ea28b0ed87da --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +c69399183d5f9f85f3f8130452d0bed62fd92440 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 8471aff350b83..0000000000000 --- a/server/licenses/lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4cf1b911521f962c9bd2d28efb519bf9e1b88f4 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 530c5ce4f6396..1fc5ef474ee04 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1009,10 +1009,8 @@ private enum ElasticsearchExceptionHandle { MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_7_0_0), COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, Version.V_7_0_0), - CLUSTER_ALREADY_BOOTSTRAPPED_EXCEPTION(org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException.class, - org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException::new, 151, Version.V_7_0_0), SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, - org.elasticsearch.snapshots.SnapshotInProgressException::new, 152, Version.V_7_0_0); + org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, Version.V_7_0_0); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index a3d3c615162dc..142aa6bde74b6 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -23,10 +23,6 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesAction; -import org.elasticsearch.action.admin.cluster.bootstrap.TransportBootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.TransportGetDiscoveredNodesAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; @@ -433,8 +429,6 @@ public void reg actions.register(GetTaskAction.INSTANCE, TransportGetTaskAction.class); actions.register(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class); - actions.register(GetDiscoveredNodesAction.INSTANCE, TransportGetDiscoveredNodesAction.class); - actions.register(BootstrapClusterAction.INSTANCE, TransportBootstrapClusterAction.class); actions.register(AddVotingConfigExclusionsAction.INSTANCE, TransportAddVotingConfigExclusionsAction.class); actions.register(ClearVotingConfigExclusionsAction.INSTANCE, TransportClearVotingConfigExclusionsAction.class); actions.register(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterAction.java deleted file mode 100644 index 28a8e580cedc4..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterAction.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.Action; -import org.elasticsearch.common.io.stream.Writeable.Reader; - -public class BootstrapClusterAction extends Action { - public static final BootstrapClusterAction INSTANCE = new BootstrapClusterAction(); - public static final String NAME = "cluster:admin/bootstrap/set_voting_config"; - - private BootstrapClusterAction() { - super(NAME); - } - - @Override - public BootstrapClusterResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public Reader getResponseReader() { - return BootstrapClusterResponse::new; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequest.java deleted file mode 100644 index f8d0bcb13a58f..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Request to set the initial configuration of master-eligible nodes in a cluster so that the very first master election can take place. - */ -public class BootstrapClusterRequest extends ActionRequest { - private final BootstrapConfiguration bootstrapConfiguration; - - public BootstrapClusterRequest(BootstrapConfiguration bootstrapConfiguration) { - this.bootstrapConfiguration = bootstrapConfiguration; - } - - public BootstrapClusterRequest(StreamInput in) throws IOException { - super(in); - bootstrapConfiguration = new BootstrapConfiguration(in); - } - - /** - * @return the bootstrap configuration: the initial set of master-eligible nodes whose votes are counted in elections. - */ - public BootstrapConfiguration getBootstrapConfiguration() { - return bootstrapConfiguration; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - bootstrapConfiguration.writeTo(out); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponse.java deleted file mode 100644 index 2576409a3cef1..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponse.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Response to a {@link BootstrapClusterRequest} indicating that the cluster has been successfully bootstrapped. - */ -public class BootstrapClusterResponse extends ActionResponse { - private final boolean alreadyBootstrapped; - - public BootstrapClusterResponse(boolean alreadyBootstrapped) { - this.alreadyBootstrapped = alreadyBootstrapped; - } - - public BootstrapClusterResponse(StreamInput in) throws IOException { - super(in); - alreadyBootstrapped = in.readBoolean(); - } - - /** - * @return whether this node already knew that the cluster had been bootstrapped when handling this request. - */ - public boolean getAlreadyBootstrapped() { - return alreadyBootstrapped; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(alreadyBootstrapped); - } - - @Override - public String toString() { - return "BootstrapClusterResponse{" + - "alreadyBootstrapped=" + alreadyBootstrapped + - '}'; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfiguration.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfiguration.java deleted file mode 100644 index 822287af77465..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfiguration.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; - -public class BootstrapConfiguration implements Writeable { - - private final List nodeDescriptions; - - public BootstrapConfiguration(List nodeDescriptions) { - if (nodeDescriptions.isEmpty()) { - throw new IllegalArgumentException("cannot create empty bootstrap configuration"); - } - this.nodeDescriptions = Collections.unmodifiableList(new ArrayList<>(nodeDescriptions)); - } - - public BootstrapConfiguration(StreamInput in) throws IOException { - nodeDescriptions = Collections.unmodifiableList(in.readList(NodeDescription::new)); - assert nodeDescriptions.isEmpty() == false; - } - - public List getNodeDescriptions() { - return nodeDescriptions; - } - - public VotingConfiguration resolve(Iterable discoveredNodes) { - final Set selectedNodes = new HashSet<>(); - for (final NodeDescription nodeDescription : nodeDescriptions) { - final DiscoveryNode discoveredNode = nodeDescription.resolve(discoveredNodes); - if (selectedNodes.add(discoveredNode) == false) { - throw new ElasticsearchException("multiple nodes matching {} in {}", discoveredNode, this); - } - } - - final Set nodeIds = selectedNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); - assert nodeIds.size() == selectedNodes.size() : selectedNodes + " does not contain distinct IDs"; - return new VotingConfiguration(nodeIds); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeList(nodeDescriptions); - } - - @Override - public String toString() { - return "BootstrapConfiguration{" + - "nodeDescriptions=" + nodeDescriptions + - '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - BootstrapConfiguration that = (BootstrapConfiguration) o; - return Objects.equals(nodeDescriptions, that.nodeDescriptions); - } - - @Override - public int hashCode() { - return Objects.hash(nodeDescriptions); - } - - public static class NodeDescription implements Writeable { - - @Nullable - private final String id; - - private final String name; - - @Nullable - public String getId() { - return id; - } - - public String getName() { - return name; - } - - public NodeDescription(@Nullable String id, String name) { - this.id = id; - this.name = Objects.requireNonNull(name); - } - - public NodeDescription(DiscoveryNode discoveryNode) { - this(discoveryNode.getId(), discoveryNode.getName()); - } - - public NodeDescription(StreamInput in) throws IOException { - this(in.readOptionalString(), in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(id); - out.writeString(name); - } - - @Override - public String toString() { - return "NodeDescription{" + - "id='" + id + '\'' + - ", name='" + name + '\'' + - '}'; - } - - public DiscoveryNode resolve(Iterable discoveredNodes) { - DiscoveryNode selectedNode = null; - for (final DiscoveryNode discoveredNode : discoveredNodes) { - assert discoveredNode.isMasterNode() : discoveredNode; - if (discoveredNode.getName().equals(name)) { - if (id == null || id.equals(discoveredNode.getId())) { - if (selectedNode != null) { - throw new ElasticsearchException( - "discovered multiple nodes matching {} in {}", this, discoveredNodes); - } - selectedNode = discoveredNode; - } else { - throw new ElasticsearchException("node id mismatch comparing {} to {}", this, discoveredNode); - } - } else if (id != null && id.equals(discoveredNode.getId())) { - throw new ElasticsearchException("node name mismatch comparing {} to {}", this, discoveredNode); - } - } - if (selectedNode == null) { - throw new ElasticsearchException("no node matching {} found in {}", this, discoveredNodes); - } - - return selectedNode; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - NodeDescription that = (NodeDescription) o; - return Objects.equals(id, that.id) && - Objects.equals(name, that.name); - } - - @Override - public int hashCode() { - return Objects.hash(id, name); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesAction.java deleted file mode 100644 index acaef284a5420..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesAction.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.Action; -import org.elasticsearch.common.io.stream.Writeable.Reader; - -public class GetDiscoveredNodesAction extends Action { - public static final GetDiscoveredNodesAction INSTANCE = new GetDiscoveredNodesAction(); - public static final String NAME = "cluster:admin/bootstrap/discover_nodes"; - - private GetDiscoveredNodesAction() { - super(NAME); - } - - @Override - public GetDiscoveredNodesResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public Reader getResponseReader() { - return GetDiscoveredNodesResponse::new; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequest.java deleted file mode 100644 index f91a4de5263be..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequest.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -/** - * Request the set of master-eligible nodes discovered by this node. Most useful in a brand-new cluster as a precursor to setting the - * initial configuration using {@link BootstrapClusterRequest}. - */ -public class GetDiscoveredNodesRequest extends ActionRequest { - - @Nullable // if the request should wait indefinitely - private TimeValue timeout = TimeValue.timeValueSeconds(30); - - private List requiredNodes = Collections.emptyList(); - - public GetDiscoveredNodesRequest() { - } - - public GetDiscoveredNodesRequest(StreamInput in) throws IOException { - super(in); - timeout = in.readOptionalTimeValue(); - requiredNodes = in.readList(StreamInput::readString); - } - - /** - * Sometimes it is useful to wait until enough nodes have been discovered, rather than failing immediately. This parameter controls how - * long to wait, and defaults to 30s. - * - * @param timeout how long to wait to discover sufficiently many nodes to respond successfully. - */ - public void setTimeout(@Nullable TimeValue timeout) { - if (timeout != null && timeout.compareTo(TimeValue.ZERO) < 0) { - throw new IllegalArgumentException("negative timeout of [" + timeout + "] is not allowed"); - } - this.timeout = timeout; - } - - /** - * Sometimes it is useful to wait until enough nodes have been discovered, rather than failing immediately. This parameter controls how - * long to wait, and defaults to 30s. - * - * @return how long to wait to discover sufficiently many nodes to respond successfully. - */ - @Nullable - public TimeValue getTimeout() { - return timeout; - } - - /** - * Sometimes it is useful only to receive a successful response after discovering a certain set of master-eligible nodes. - * This parameter gives the names or transport addresses of the expected nodes. - * - * @return list of expected nodes - */ - public List getRequiredNodes() { - return requiredNodes; - } - - /** - * Sometimes it is useful only to receive a successful response after discovering a certain set of master-eligible nodes. - * This parameter gives the names or transport addresses of the expected nodes. - * - * @param requiredNodes list of expected nodes - */ - public void setRequiredNodes(final List requiredNodes) { - this.requiredNodes = requiredNodes; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalTimeValue(timeout); - out.writeStringList(requiredNodes); - } - - @Override - public String toString() { - return "GetDiscoveredNodesRequest{" + - "timeout=" + timeout + - ", requiredNodes=" + requiredNodes + "}"; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponse.java deleted file mode 100644 index f697e16c03c2c..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponse.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * Response to {@link GetDiscoveredNodesRequest}, containing the set of master-eligible nodes that were discovered. - */ -public class GetDiscoveredNodesResponse extends ActionResponse { - private final Set nodes; - - public GetDiscoveredNodesResponse(Set nodes) { - this.nodes = Collections.unmodifiableSet(new HashSet<>(nodes)); - } - - public GetDiscoveredNodesResponse(StreamInput in) throws IOException { - super(in); - nodes = Collections.unmodifiableSet(in.readSet(DiscoveryNode::new)); - } - - /** - * @return the set of nodes that were discovered. - */ - public Set getNodes() { - return nodes; - } - - /** - * @return a bootstrap configuration constructed from the set of nodes that were discovered, in order to make a - * {@link BootstrapClusterRequest}. - */ - public BootstrapConfiguration getBootstrapConfiguration() { - return new BootstrapConfiguration(nodes.stream().map(NodeDescription::new).collect(Collectors.toList())); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeCollection(nodes); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterAction.java deleted file mode 100644 index 32a9f39cc0db8..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterAction.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.transport.TransportService; - -import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_TYPE_SETTING; - -public class TransportBootstrapClusterAction extends HandledTransportAction { - - @Nullable // TODO make this not nullable - private final Coordinator coordinator; - private final TransportService transportService; - private final String discoveryType; - - @Inject - public TransportBootstrapClusterAction(Settings settings, ActionFilters actionFilters, TransportService transportService, - Discovery discovery) { - super(BootstrapClusterAction.NAME, transportService, actionFilters, BootstrapClusterRequest::new); - this.transportService = transportService; - this.discoveryType = DISCOVERY_TYPE_SETTING.get(settings); - if (discovery instanceof Coordinator) { - coordinator = (Coordinator) discovery; - } else { - coordinator = null; - } - } - - @Override - protected void doExecute(Task task, BootstrapClusterRequest request, ActionListener listener) { - if (coordinator == null) { // TODO remove when not nullable - throw new IllegalArgumentException("cluster bootstrapping is not supported by discovery type [" + discoveryType + "]"); - } - - final DiscoveryNode localNode = transportService.getLocalNode(); - assert localNode != null; - if (localNode.isMasterNode() == false) { - throw new IllegalArgumentException( - "this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node"); - } - - transportService.getThreadPool().generic().execute(new AbstractRunnable() { - @Override - public void doRun() { - listener.onResponse(new BootstrapClusterResponse( - coordinator.setInitialConfiguration(request.getBootstrapConfiguration()) == false)); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - - @Override - public String toString() { - return "setting initial configuration with " + request; - } - }); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesAction.java deleted file mode 100644 index 6f6336c3bd5f3..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesAction.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.ListenableFuture; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportService; - -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - -import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_TYPE_SETTING; - -public class TransportGetDiscoveredNodesAction extends HandledTransportAction { - - @Nullable // TODO make this not nullable - private final Coordinator coordinator; - private final TransportService transportService; - private final String discoveryType; - - @Inject - public TransportGetDiscoveredNodesAction(Settings settings, ActionFilters actionFilters, TransportService transportService, - Discovery discovery) { - super(GetDiscoveredNodesAction.NAME, transportService, actionFilters, - (Reader) GetDiscoveredNodesRequest::new); - - this.discoveryType = DISCOVERY_TYPE_SETTING.get(settings); - this.transportService = transportService; - if (discovery instanceof Coordinator) { - coordinator = (Coordinator) discovery; - } else { - coordinator = null; - } - } - - @Override - protected void doExecute(Task task, GetDiscoveredNodesRequest request, ActionListener listener) { - if (coordinator == null) { // TODO remove when not nullable - throw new IllegalArgumentException("discovered nodes are not exposed by discovery type [" + discoveryType + "]"); - } - - final DiscoveryNode localNode = transportService.getLocalNode(); - assert localNode != null; - if (localNode.isMasterNode() == false) { - throw new IllegalArgumentException( - "this node is not master-eligible, but discovered nodes are only exposed by master-eligible nodes"); - } - final ExecutorService directExecutor = EsExecutors.newDirectExecutorService(); - final AtomicBoolean listenerNotified = new AtomicBoolean(); - final ListenableFuture listenableFuture = new ListenableFuture<>(); - final ThreadPool threadPool = transportService.getThreadPool(); - listenableFuture.addListener(listener, directExecutor, threadPool.getThreadContext()); - // TODO make it so that listenableFuture copes with multiple completions, and then remove listenerNotified - - final ActionListener> respondIfRequestSatisfied = new ActionListener>() { - @Override - public void onResponse(Iterable nodes) { - final Set nodesSet = new LinkedHashSet<>(); - nodesSet.add(localNode); - nodes.forEach(nodesSet::add); - logger.trace("discovered {}", nodesSet); - try { - if (checkWaitRequirements(request, nodesSet)) { - final GetDiscoveredNodesResponse response = new GetDiscoveredNodesResponse(nodesSet); - if (listenerNotified.compareAndSet(false, true)) { - listenableFuture.onResponse(response); - } - } - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - if (listenerNotified.compareAndSet(false, true)) { - listenableFuture.onFailure(e); - } - } - - @Override - public String toString() { - return "waiting for " + request; - } - }; - - final Releasable releasable = coordinator.withDiscoveryListener(respondIfRequestSatisfied); - listenableFuture.addListener(ActionListener.wrap(releasable::close), directExecutor, threadPool.getThreadContext()); - - if (coordinator.isInitialConfigurationSet()) { - respondIfRequestSatisfied.onFailure(new ClusterAlreadyBootstrappedException()); - } else { - respondIfRequestSatisfied.onResponse(coordinator.getFoundPeers()); - } - - if (request.getTimeout() != null) { - threadPool.schedule(request.getTimeout(), Names.SAME, new Runnable() { - @Override - public void run() { - respondIfRequestSatisfied.onFailure(new ElasticsearchTimeoutException("timed out while waiting for " + request)); - } - - @Override - public String toString() { - return "timeout handler for " + request; - } - }); - } - } - - private static boolean matchesRequirement(DiscoveryNode discoveryNode, String requirement) { - return discoveryNode.getName().equals(requirement) - || discoveryNode.getAddress().toString().equals(requirement) - || discoveryNode.getAddress().getAddress().equals(requirement); - } - - private static boolean checkWaitRequirements(GetDiscoveredNodesRequest request, Set nodes) { - List requirements = request.getRequiredNodes(); - final Set selectedNodes = new HashSet<>(); - for (final String requirement : requirements) { - final Set matchingNodes - = nodes.stream().filter(n -> matchesRequirement(n, requirement)).collect(Collectors.toSet()); - - if (matchingNodes.isEmpty()) { - return false; - } - if (matchingNodes.size() > 1) { - throw new IllegalArgumentException("[" + requirement + "] matches " + matchingNodes); - } - - for (final DiscoveryNode matchingNode : matchingNodes) { - if (selectedNodes.add(matchingNode) == false) { - throw new IllegalArgumentException("[" + matchingNode + "] matches " + - requirements.stream().filter(r -> matchesRequirement(matchingNode, requirement)).collect(Collectors.toList())); - } - } - } - - return true; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index 57407bd61fb82..c8889c86c1df7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -29,15 +29,11 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.Map; public class ClusterSearchShardsResponse extends ActionResponse implements ToXContentObject { - public static final ClusterSearchShardsResponse EMPTY = new ClusterSearchShardsResponse(new ClusterSearchShardsGroup[0], - new DiscoveryNode[0], Collections.emptyMap()); - private final ClusterSearchShardsGroup[] groups; private final DiscoveryNode[] nodes; private final Map indicesAndFilters; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index f4fa0176cb9e5..9fc35dc7be38a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -155,6 +155,7 @@ private void buildResponse(final ClusterStateRequest request, if (request.metaData()) { if (request.indices().length > 0) { + mdBuilder.version(currentState.metaData().version()); String[] indices = indexNameExpressionResolver.concreteIndexNames(currentState, request); for (String filteredIndex : indices) { IndexMetaData indexMetaData = currentState.metaData().index(filteredIndex); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java index 819d2de999ccf..dde7c13e2c01a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java @@ -29,7 +29,12 @@ import java.io.IOException; -/** Request the mappings of specific fields */ +/** + * Request the mappings of specific fields + * + * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. + * Any changes done to this class should go to that client class as well. + */ public class GetFieldMappingsRequest extends ActionRequest implements IndicesRequest.Replaceable { protected boolean local = false; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index f24c75d988e88..d3200bc1e1d9a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -50,7 +50,12 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.rest.BaseRestHandler.DEFAULT_INCLUDE_TYPE_NAME_POLICY; -/** Response object for {@link GetFieldMappingsRequest} API */ +/** + * Response object for {@link GetFieldMappingsRequest} API + * + * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. + * Any changes done to this class should go to that client class as well. + */ public class GetFieldMappingsResponse extends ActionResponse implements ToXContentObject { private static final ParseField MAPPINGS = new ParseField("mappings"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 926ae175d65ad..6868708145359 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -254,7 +254,7 @@ public static XContentBuilder buildFromSimplifiedDef(String type, Object... sour * The mapping source definition. */ public PutMappingRequest source(XContentBuilder mappingBuilder) { - return source(Strings.toString(mappingBuilder), mappingBuilder.contentType()); + return source(BytesReference.bytes(mappingBuilder), mappingBuilder.contentType()); } /** @@ -264,7 +264,7 @@ public PutMappingRequest source(Map mappingSource) { try { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.map(mappingSource); - return source(Strings.toString(builder), XContentType.JSON); + return source(BytesReference.bytes(builder), builder.contentType()); } catch (IOException e) { throw new ElasticsearchGenerationException("Failed to generate [" + mappingSource + "]", e); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index ce82d277dbbd7..0ff5785fcd358 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -461,7 +461,7 @@ public void readFrom(StreamInput in) throws IOException { name = in.readString(); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - indexPatterns = in.readList(StreamInput::readString); + indexPatterns = in.readStringList(); } else { indexPatterns = Collections.singletonList(in.readString()); } @@ -495,7 +495,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(cause); out.writeString(name); if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - out.writeStringList(indexPatterns); + out.writeStringCollection(indexPatterns); } else { out.writeString(indexPatterns.size() > 0 ? indexPatterns.get(0) : ""); } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index f031dfa581064..d6abbf73e8864 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.Nullable; @@ -43,7 +44,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.stream.Collectors; @@ -70,7 +70,7 @@ abstract class AbstractSearchAsyncAction exten private final Object shardFailuresMutex = new Object(); private final AtomicInteger successfulOps = new AtomicInteger(); private final AtomicInteger skippedOps = new AtomicInteger(); - private final TransportSearchAction.SearchTimeProvider timeProvider; + private final SearchTimeProvider timeProvider; private final SearchResponse.Clusters clusters; AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService, @@ -79,7 +79,7 @@ abstract class AbstractSearchAsyncAction exten Map> indexRoutings, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, - TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, + SearchTimeProvider timeProvider, long clusterStateVersion, SearchTask task, SearchPhaseResults resultConsumer, int maxConcurrentRequestsPerNode, SearchResponse.Clusters clusters) { super(name, request, shardsIts, logger, maxConcurrentRequestsPerNode, executor); @@ -103,8 +103,7 @@ abstract class AbstractSearchAsyncAction exten * Builds how long it took to execute the search. */ long buildTookInMillis() { - return TimeUnit.NANOSECONDS.toMillis( - timeProvider.getRelativeCurrentNanos() - timeProvider.getRelativeStartNanos()); + return timeProvider.buildTookInMillis(); } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index da481b7a4a8ee..10a85b723166c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -153,6 +153,7 @@ private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder optio groupSource.explain(options.isExplain()); groupSource.trackScores(options.isTrackScores()); groupSource.version(options.isVersion()); + groupSource.seqNoAndPrimaryTerm(options.isSeqNoAndPrimaryTerm()); if (innerCollapseBuilder != null) { groupSource.collapse(innerCollapseBuilder); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 418d95b2077a9..027d9d5f10c25 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -170,7 +170,7 @@ static SortedTopDocs sortDocs(boolean ignoreFrom, Collection 0) { setShardIndex(td.topDocs, queryResult.getShardIndex()); @@ -439,12 +439,10 @@ private ReducedQueryPhase reducedQueryPhase(Collection= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases; numReducePhases++; // increment for this phase - boolean timedOut = false; - Boolean terminatedEarly = null; if (queryResults.isEmpty()) { // early terminate we have nothing to reduce final TotalHits totalHits = topDocsStats.getTotalHits(); - return new ReducedQueryPhase(totalHits, topDocsStats.fetchHits, topDocsStats.maxScore, - timedOut, terminatedEarly, null, null, null, SortedTopDocs.EMPTY, null, numReducePhases, 0, 0, true); + return new ReducedQueryPhase(totalHits, topDocsStats.fetchHits, topDocsStats.getMaxScore(), + false, null, null, null, null, SortedTopDocs.EMPTY, null, numReducePhases, 0, 0, true); } final QuerySearchResult firstResult = queryResults.stream().findFirst().get().queryResult(); final boolean hasSuggest = firstResult.suggest() != null; @@ -476,16 +474,6 @@ private ReducedQueryPhase reducedQueryPhase(Collection> suggestion : result.suggest()) { @@ -508,8 +496,8 @@ private ReducedQueryPhase reducedQueryPhase(CollectiontrackTotalHitsUpTo * so we return this lower bound when the total hits is greater than this value. * This can happen when multiple shards are merged since the limit to track total hits @@ -777,7 +763,7 @@ TotalHits getTotalHits() { } } - void add(TopDocsAndMaxScore topDocs) { + void add(TopDocsAndMaxScore topDocs, boolean timedOut, Boolean terminatedEarly) { if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { totalHits += topDocs.topDocs.totalHits.value; if (topDocs.topDocs.totalHits.relation == Relation.GREATER_THAN_OR_EQUAL_TO) { @@ -788,6 +774,16 @@ void add(TopDocsAndMaxScore topDocs) { if (!Float.isNaN(topDocs.maxScore)) { maxScore = Math.max(maxScore, topDocs.maxScore); } + if (timedOut) { + this.timedOut = true; + } + if (terminatedEarly != null) { + if (this.terminatedEarly == null) { + this.terminatedEarly = terminatedEarly; + } else if (terminatedEarly) { + this.terminatedEarly = true; + } + } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java new file mode 100644 index 0000000000000..b146d42c0d2e6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -0,0 +1,297 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.SearchProfileShardResults; +import org.elasticsearch.search.suggest.Suggest; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Function; + +import static org.elasticsearch.action.search.SearchPhaseController.TopDocsStats; +import static org.elasticsearch.action.search.SearchPhaseController.mergeTopDocs; +import static org.elasticsearch.action.search.SearchResponse.Clusters; + +/** + * Merges multiple search responses into one. Used in cross-cluster search when reduction is performed locally on each cluster. + * The CCS coordinating node sends one search request per remote cluster involved and gets one search response back from each one of them. + * Such responses contain all the info to be able to perform an additional reduction and return results back to the user. + * Preconditions are that only non final reduction has been performed on each cluster, meaning that buckets have not been pruned locally + * and pipeline aggregations have not yet been executed. Also, from+size search hits need to be requested to each cluster and such results + * have all already been fetched downstream. + * This approach consists of a different trade-off compared to ordinary cross-cluster search where we fan out to all the shards, no matter + * whether they belong to the local or the remote cluster. Assuming that there commonly is network latency when communicating with remote + * clusters, limiting the number of requests to one per cluster is beneficial, and outweighs the downside of fetching many more hits than + * needed downstream and returning bigger responses to the coordinating node. + * Known limitations: + * - scroll requests are not supported + * - field collapsing is supported, but whenever inner_hits are requested, they will be retrieved by each cluster locally after the fetch + * phase, through the {@link ExpandSearchPhase}. Such inner_hits are not merged together as part of hits reduction. + */ +//TODO it may make sense to integrate the remote clusters responses as a shard response in the initial search phase and ignore hits coming +//from the remote clusters in the fetch phase. This would be identical to the removed QueryAndFetch strategy except that only the remote +//cluster response would have the fetch results. +final class SearchResponseMerger { + private final int from; + private final int size; + private final int trackTotalHitsUpTo; + private final SearchTimeProvider searchTimeProvider; + private final Function reduceContextFunction; + private final List searchResponses = new CopyOnWriteArrayList<>(); + + SearchResponseMerger(int from, int size, int trackTotalHitsUpTo, SearchTimeProvider searchTimeProvider, + Function reduceContextFunction) { + this.from = from; + this.size = size; + this.trackTotalHitsUpTo = trackTotalHitsUpTo; + this.searchTimeProvider = Objects.requireNonNull(searchTimeProvider); + this.reduceContextFunction = Objects.requireNonNull(reduceContextFunction); + } + + /** + * Add a search response to the list of responses to be merged together into one. + * Merges currently happen at once when all responses are available and {@link #getMergedResponse(Clusters)} )} is called. + * That may change in the future as it's possible to introduce incremental merges as responses come in if necessary. + */ + void add(SearchResponse searchResponse) { + searchResponses.add(searchResponse); + } + + /** + * Returns the merged response. To be called once all responses have been added through {@link #add(SearchResponse)} + * so that all responses are merged into a single one. + */ + SearchResponse getMergedResponse(Clusters clusters) { + assert searchResponses.size() > 1; + int totalShards = 0; + int skippedShards = 0; + int successfulShards = 0; + //the current reduce phase counts as one + int numReducePhases = 1; + List failures = new ArrayList<>(); + Map profileResults = new HashMap<>(); + List aggs = new ArrayList<>(); + Map shards = new TreeMap<>(); + List topDocsList = new ArrayList<>(searchResponses.size()); + Map> groupedSuggestions = new HashMap<>(); + Boolean trackTotalHits = null; + + TopDocsStats topDocsStats = new TopDocsStats(trackTotalHitsUpTo); + + for (SearchResponse searchResponse : searchResponses) { + totalShards += searchResponse.getTotalShards(); + skippedShards += searchResponse.getSkippedShards(); + successfulShards += searchResponse.getSuccessfulShards(); + numReducePhases += searchResponse.getNumReducePhases(); + + Collections.addAll(failures, searchResponse.getShardFailures()); + + profileResults.putAll(searchResponse.getProfileResults()); + + if (searchResponse.getAggregations() != null) { + InternalAggregations internalAggs = (InternalAggregations) searchResponse.getAggregations(); + aggs.add(internalAggs); + } + + Suggest suggest = searchResponse.getSuggest(); + if (suggest != null) { + for (Suggest.Suggestion> entries : suggest) { + List suggestionList = groupedSuggestions.computeIfAbsent(entries.getName(), s -> new ArrayList<>()); + suggestionList.add(entries); + } + } + + SearchHits searchHits = searchResponse.getHits(); + + final TotalHits totalHits; + if (searchHits.getTotalHits() == null) { + //in case we didn't track total hits, we get null from each cluster, but we need to set 0 eq to the TopDocs + totalHits = new TotalHits(0, TotalHits.Relation.EQUAL_TO); + assert trackTotalHits == null || trackTotalHits == false; + trackTotalHits = false; + } else { + totalHits = searchHits.getTotalHits(); + assert trackTotalHits == null || trackTotalHits; + trackTotalHits = true; + } + TopDocs topDocs = searchHitsToTopDocs(searchHits, totalHits, shards); + topDocsStats.add(new TopDocsAndMaxScore(topDocs, searchHits.getMaxScore()), + searchResponse.isTimedOut(), searchResponse.isTerminatedEarly()); + topDocsList.add(topDocs); + } + + //after going through all the hits and collecting all their distinct shards, we can assign shardIndex and set it to the ScoreDocs + setShardIndex(shards, topDocsList); + TopDocs topDocs = mergeTopDocs(topDocsList, size, from); + SearchHits mergedSearchHits = topDocsToSearchHits(topDocs, topDocsStats); + Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions)); + InternalAggregations reducedAggs = InternalAggregations.reduce(aggs, reduceContextFunction.apply(true)); + ShardSearchFailure[] shardFailures = failures.toArray(ShardSearchFailure.EMPTY_ARRAY); + //make failures ordering consistent with ordinary search and CCS + Arrays.sort(shardFailures, FAILURES_COMPARATOR); + InternalSearchResponse response = new InternalSearchResponse(mergedSearchHits, reducedAggs, suggest, + new SearchProfileShardResults(profileResults), topDocsStats.timedOut, topDocsStats.terminatedEarly, numReducePhases); + long tookInMillis = searchTimeProvider.buildTookInMillis(); + return new SearchResponse(response, null, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters); + } + + private static final Comparator FAILURES_COMPARATOR = new Comparator() { + @Override + public int compare(ShardSearchFailure o1, ShardSearchFailure o2) { + ShardId shardId1 = extractShardId(o1); + ShardId shardId2 = extractShardId(o2); + if (shardId1 == null && shardId2 == null) { + return 0; + } + if (shardId1 == null) { + return -1; + } + if (shardId2 == null) { + return 1; + } + return shardId1.compareTo(shardId2); + } + + private ShardId extractShardId(ShardSearchFailure failure) { + SearchShardTarget shard = failure.shard(); + if (shard != null) { + return shard.getShardId(); + } + Throwable cause = failure.getCause(); + if (cause instanceof ElasticsearchException) { + ElasticsearchException e = (ElasticsearchException) cause; + return e.getShardId(); + } + return null; + } + }; + + private static TopDocs searchHitsToTopDocs(SearchHits searchHits, TotalHits totalHits, Map shards) { + SearchHit[] hits = searchHits.getHits(); + ScoreDoc[] scoreDocs = new ScoreDoc[hits.length]; + final TopDocs topDocs; + if (searchHits.getSortFields() != null) { + if (searchHits.getCollapseField() != null) { + assert searchHits.getCollapseValues() != null; + topDocs = new CollapseTopFieldDocs(searchHits.getCollapseField(), totalHits, scoreDocs, + searchHits.getSortFields(), searchHits.getCollapseValues()); + } else { + topDocs = new TopFieldDocs(totalHits, scoreDocs, searchHits.getSortFields()); + } + } else { + topDocs = new TopDocs(totalHits, scoreDocs); + } + + for (int i = 0; i < hits.length; i++) { + SearchHit hit = hits[i]; + ShardId shardId = hit.getShard().getShardId(); + shards.putIfAbsent(shardId, null); + final SortField[] sortFields = searchHits.getSortFields(); + final Object[] sortValues; + if (sortFields == null) { + sortValues = null; + } else { + if (sortFields.length == 1 && sortFields[0].getType() == SortField.Type.SCORE) { + sortValues = new Object[]{hit.getScore()}; + } else { + sortValues = hit.getRawSortValues(); + } + } + scoreDocs[i] = new FieldDocAndSearchHit(hit.docId(), hit.getScore(), sortValues, hit); + } + return topDocs; + } + + private static void setShardIndex(Map shards, List topDocsList) { + int shardIndex = 0; + for (Map.Entry shard : shards.entrySet()) { + shard.setValue(shardIndex++); + } + //and go through all the scoreDocs from each cluster and set their corresponding shardIndex + for (TopDocs topDocs : topDocsList) { + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + FieldDocAndSearchHit fieldDocAndSearchHit = (FieldDocAndSearchHit) scoreDoc; + //When hits come from the indices with same names on multiple clusters and same shard identifier, we rely on such indices + //to have a different uuid across multiple clusters. That's how they will get a different shardIndex. + ShardId shardId = fieldDocAndSearchHit.searchHit.getShard().getShardId(); + fieldDocAndSearchHit.shardIndex = shards.get(shardId); + } + } + } + + private static SearchHits topDocsToSearchHits(TopDocs topDocs, TopDocsStats topDocsStats) { + SearchHit[] searchHits = new SearchHit[topDocs.scoreDocs.length]; + for (int i = 0; i < topDocs.scoreDocs.length; i++) { + FieldDocAndSearchHit scoreDoc = (FieldDocAndSearchHit)topDocs.scoreDocs[i]; + searchHits[i] = scoreDoc.searchHit; + } + + SortField[] sortFields = null; + String collapseField = null; + Object[] collapseValues = null; + if (topDocs instanceof TopFieldDocs) { + sortFields = ((TopFieldDocs)topDocs).fields; + if (topDocs instanceof CollapseTopFieldDocs) { + CollapseTopFieldDocs collapseTopFieldDocs = (CollapseTopFieldDocs)topDocs; + collapseField = collapseTopFieldDocs.field; + collapseValues = collapseTopFieldDocs.collapseValues; + } + } + return new SearchHits(searchHits, topDocsStats.getTotalHits(), topDocsStats.getMaxScore(), + sortFields, collapseField, collapseValues); + } + + private static final class FieldDocAndSearchHit extends FieldDoc { + private final SearchHit searchHit; + + //to simplify things, we use a FieldDoc all the time, even when only a ScoreDoc is needed, in which case fields are null. + FieldDocAndSearchHit(int doc, float score, Object[] fields, SearchHit searchHit) { + super(doc, score, fields); + this.searchHit = searchHit; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 88e2764982cb4..30e030eca7376 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -22,10 +22,12 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -39,6 +41,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; @@ -50,6 +53,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; @@ -60,7 +64,11 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.LongSupplier; @@ -140,7 +148,7 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * to moving backwards due to NTP and other such complexities, etc.). There are also issues with * using a relative clock for reporting real time. Thus, we simply separate these two uses. */ - static class SearchTimeProvider { + static final class SearchTimeProvider { private final long absoluteStartMillis; private final long relativeStartNanos; @@ -170,12 +178,8 @@ long getAbsoluteStartMillis() { return absoluteStartMillis; } - long getRelativeStartNanos() { - return relativeStartNanos; - } - - long getRelativeCurrentNanos() { - return relativeCurrentNanosProvider.getAsLong(); + long buildTookInMillis() { + return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); } } @@ -198,17 +202,23 @@ protected void doExecute(Task task, SearchRequest searchRequest, ActionListener< executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(), (clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener, SearchResponse.Clusters.EMPTY); } else { - remoteClusterService.collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(), - searchRequest.routing(), remoteClusterIndices, ActionListener.wrap((searchShardsResponses) -> { - List remoteShardIterators = new ArrayList<>(); - Map remoteAliasFilters = new HashMap<>(); - BiFunction clusterNodeLookup = processRemoteShards(searchShardsResponses, - remoteClusterIndices, remoteShardIterators, remoteAliasFilters); - SearchResponse.Clusters clusters = buildClusters(localIndices, remoteClusterIndices, searchShardsResponses); - executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, - remoteShardIterators, clusterNodeLookup, clusterState, remoteAliasFilters, listener, - clusters); - }, listener::onFailure)); + AtomicInteger skippedClusters = new AtomicInteger(0); + collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(), searchRequest.routing(), skippedClusters, + remoteClusterIndices, remoteClusterService, threadPool, + ActionListener.wrap( + searchShardsResponses -> { + List remoteShardIterators = new ArrayList<>(); + Map remoteAliasFilters = new HashMap<>(); + BiFunction clusterNodeLookup = processRemoteShards( + searchShardsResponses, remoteClusterIndices, remoteShardIterators, remoteAliasFilters); + int localClusters = localIndices == null ? 0 : 1; + int totalClusters = remoteClusterIndices.size() + localClusters; + int successfulClusters = searchShardsResponses.size() + localClusters; + executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, + remoteShardIterators, clusterNodeLookup, clusterState, remoteAliasFilters, listener, + new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get())); + }, + listener::onFailure)); } }, listener::onFailure); if (searchRequest.source() == null) { @@ -219,18 +229,56 @@ protected void doExecute(Task task, SearchRequest searchRequest, ActionListener< } } - static SearchResponse.Clusters buildClusters(OriginalIndices localIndices, Map remoteIndices, - Map searchShardsResponses) { - int localClusters = localIndices == null ? 0 : 1; - int totalClusters = remoteIndices.size() + localClusters; - int successfulClusters = localClusters; - for (ClusterSearchShardsResponse searchShardsResponse : searchShardsResponses.values()) { - if (searchShardsResponse != ClusterSearchShardsResponse.EMPTY) { - successfulClusters++; - } + static void collectSearchShards(IndicesOptions indicesOptions, String preference, String routing, AtomicInteger skippedClusters, + Map remoteIndicesByCluster, RemoteClusterService remoteClusterService, + ThreadPool threadPool, ActionListener> listener) { + final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); + final Map searchShardsResponses = new ConcurrentHashMap<>(); + final AtomicReference transportException = new AtomicReference<>(); + for (Map.Entry entry : remoteIndicesByCluster.entrySet()) { + final String clusterAlias = entry.getKey(); + boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); + Client clusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); + final String[] indices = entry.getValue().indices(); + ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices) + .indicesOptions(indicesOptions).local(true).preference(preference).routing(routing); + clusterClient.admin().cluster().searchShards(searchShardsRequest, new ActionListener() { + @Override + public void onResponse(ClusterSearchShardsResponse response) { + searchShardsResponses.put(clusterAlias, response); + maybeFinish(); + } + + @Override + public void onFailure(Exception e) { + if (skipUnavailable) { + skippedClusters.incrementAndGet(); + } else { + RemoteTransportException exception = + new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); + if (transportException.compareAndSet(null, exception) == false) { + transportException.accumulateAndGet(exception, (previous, current) -> { + current.addSuppressed(previous); + return current; + }); + } + } + maybeFinish(); + } + + private void maybeFinish() { + if (responsesCountDown.countDown()) { + RemoteTransportException exception = transportException.get(); + if (exception == null) { + listener.onResponse(searchShardsResponses); + } else { + listener.onFailure(transportException.get()); + } + } + } + } + ); } - int skippedClusters = totalClusters - successfulClusters; - return new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters); } static BiFunction processRemoteShards(Map searchShardsResponses, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java index fc3f4493104fc..cdbf6b6691077 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java @@ -21,56 +21,73 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterResponse; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesAction; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesResponse; +import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; -import java.io.IOException; -import java.util.Collections; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; public class ClusterBootstrapService { - private static final Logger logger = LogManager.getLogger(ClusterBootstrapService.class); - public static final Setting> INITIAL_MASTER_NODES_SETTING = - Setting.listSetting("cluster.initial_master_nodes", Collections.emptyList(), Function.identity(), Property.NodeScope); + Setting.listSetting("cluster.initial_master_nodes", emptyList(), Function.identity(), Property.NodeScope); public static final Setting UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING = Setting.timeSetting("discovery.unconfigured_bootstrap_timeout", TimeValue.timeValueSeconds(3), TimeValue.timeValueMillis(1), Property.NodeScope); - private final List initialMasterNodes; - @Nullable + static final String BOOTSTRAP_PLACEHOLDER_PREFIX = "{bootstrap-placeholder}-"; + + private static final Logger logger = LogManager.getLogger(ClusterBootstrapService.class); + private final Set bootstrapRequirements; + @Nullable // null if discoveryIsConfigured() private final TimeValue unconfiguredBootstrapTimeout; private final TransportService transportService; - private volatile boolean running; + private final Supplier> discoveredNodesSupplier; + private final BooleanSupplier isBootstrappedSupplier; + private final Consumer votingConfigurationConsumer; + private final AtomicBoolean bootstrappingPermitted = new AtomicBoolean(true); + + public ClusterBootstrapService(Settings settings, TransportService transportService, + Supplier> discoveredNodesSupplier, BooleanSupplier isBootstrappedSupplier, + Consumer votingConfigurationConsumer) { + + final List initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); + bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); + if (bootstrapRequirements.size() != initialMasterNodes.size()) { + throw new IllegalArgumentException( + "setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] contains duplicates: " + initialMasterNodes); + } - public ClusterBootstrapService(Settings settings, TransportService transportService) { - initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); this.transportService = transportService; + this.discoveredNodesSupplier = discoveredNodesSupplier; + this.isBootstrappedSupplier = isBootstrappedSupplier; + this.votingConfigurationConsumer = votingConfigurationConsumer; } public static boolean discoveryIsConfigured(Settings settings) { @@ -78,157 +95,142 @@ public static boolean discoveryIsConfigured(Settings settings) { .anyMatch(s -> s.exists(settings)); } - public void start() { - assert running == false; - running = true; + void onFoundPeersUpdated() { + final Set nodes = getDiscoveredNodes(); + if (bootstrappingPermitted.get() && transportService.getLocalNode().isMasterNode() && bootstrapRequirements.isEmpty() == false + && isBootstrappedSupplier.getAsBoolean() == false && nodes.stream().noneMatch(Coordinator::isZen1Node)) { + + final Tuple,List> requirementMatchingResult; + try { + requirementMatchingResult = checkRequirements(nodes); + } catch (IllegalStateException e) { + logger.warn("bootstrapping cancelled", e); + bootstrappingPermitted.set(false); + return; + } + + final Set nodesMatchingRequirements = requirementMatchingResult.v1(); + final List unsatisfiedRequirements = requirementMatchingResult.v2(); + logger.trace("nodesMatchingRequirements={}, unsatisfiedRequirements={}, bootstrapRequirements={}", + nodesMatchingRequirements, unsatisfiedRequirements, bootstrapRequirements); + + if (nodesMatchingRequirements.contains(transportService.getLocalNode()) == false) { + logger.info("skipping cluster bootstrapping as local node does not match bootstrap requirements: {}", + bootstrapRequirements); + bootstrappingPermitted.set(false); + return; + } + + if (nodesMatchingRequirements.size() * 2 > bootstrapRequirements.size()) { + startBootstrap(nodesMatchingRequirements, unsatisfiedRequirements); + } + } + } + + void scheduleUnconfiguredBootstrap() { + if (unconfiguredBootstrapTimeout == null) { + return; + } if (transportService.getLocalNode().isMasterNode() == false) { return; } - if (unconfiguredBootstrapTimeout != null) { - logger.info("no discovery configuration found, will perform best-effort cluster bootstrapping after [{}] " + - "unless existing master is discovered", unconfiguredBootstrapTimeout); - final ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + logger.info("no discovery configuration found, will perform best-effort cluster bootstrapping after [{}] " + + "unless existing master is discovered", unconfiguredBootstrapTimeout); + + transportService.getThreadPool().scheduleUnlessShuttingDown(unconfiguredBootstrapTimeout, Names.GENERIC, new Runnable() { + @Override + public void run() { + final Set discoveredNodes = getDiscoveredNodes(); + final List zen1Nodes = discoveredNodes.stream().filter(Coordinator::isZen1Node).collect(Collectors.toList()); + if (zen1Nodes.isEmpty()) { + logger.debug("performing best-effort cluster bootstrapping with {}", discoveredNodes); + startBootstrap(discoveredNodes, emptyList()); + } else { + logger.info("avoiding best-effort cluster bootstrapping due to discovery of pre-7.0 nodes {}", zen1Nodes); + } + } + + @Override + public String toString() { + return "unconfigured-discovery delayed bootstrap"; + } + }); + } + + private Set getDiscoveredNodes() { + return Stream.concat(Stream.of(transportService.getLocalNode()), + StreamSupport.stream(discoveredNodesSupplier.get().spliterator(), false)).collect(Collectors.toSet()); + } + + private void startBootstrap(Set discoveryNodes, List unsatisfiedRequirements) { + assert discoveryNodes.stream().allMatch(DiscoveryNode::isMasterNode) : discoveryNodes; + assert discoveryNodes.stream().noneMatch(Coordinator::isZen1Node) : discoveryNodes; + assert unsatisfiedRequirements.size() < discoveryNodes.size() : discoveryNodes + " smaller than " + unsatisfiedRequirements; + if (bootstrappingPermitted.compareAndSet(true, false)) { + doBootstrap(new VotingConfiguration(Stream.concat(discoveryNodes.stream().map(DiscoveryNode::getId), + unsatisfiedRequirements.stream().map(s -> BOOTSTRAP_PLACEHOLDER_PREFIX + s)) + .collect(Collectors.toSet()))); + } + } + + public static boolean isBootstrapPlaceholder(String nodeId) { + return nodeId.startsWith(BOOTSTRAP_PLACEHOLDER_PREFIX); + } + + private void doBootstrap(VotingConfiguration votingConfiguration) { + assert transportService.getLocalNode().isMasterNode(); - transportService.getThreadPool().scheduleUnlessShuttingDown(unconfiguredBootstrapTimeout, Names.SAME, new Runnable() { + try { + votingConfigurationConsumer.accept(votingConfiguration); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("exception when bootstrapping with {}, rescheduling", votingConfiguration), e); + transportService.getThreadPool().scheduleUnlessShuttingDown(TimeValue.timeValueSeconds(10), Names.GENERIC, + new Runnable() { @Override public void run() { - // TODO: remove the following line once schedule method properly preserves thread context - threadContext.markAsSystemContext(); - final GetDiscoveredNodesRequest request = new GetDiscoveredNodesRequest(); - logger.trace("sending {}", request); - transportService.sendRequest(transportService.getLocalNode(), GetDiscoveredNodesAction.NAME, request, - new TransportResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - logger.debug("discovered {}, starting to bootstrap", response.getNodes()); - awaitBootstrap(response.getBootstrapConfiguration()); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - if (rootCause instanceof ClusterAlreadyBootstrappedException) { - logger.debug(rootCause.getMessage(), rootCause); - } else { - logger.warn("discovery attempt failed", exp); - } - } - - @Override - public String executor() { - return Names.SAME; - } - - @Override - public GetDiscoveredNodesResponse read(StreamInput in) throws IOException { - return new GetDiscoveredNodesResponse(in); - } - }); + doBootstrap(votingConfiguration); } @Override public String toString() { - return "unconfigured-discovery delayed bootstrap"; + return "retry of failed bootstrapping with " + votingConfiguration; } - }); - - } - } else if (initialMasterNodes.isEmpty() == false) { - logger.debug("waiting for discovery of master-eligible nodes matching {}", initialMasterNodes); - - final ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); - - final GetDiscoveredNodesRequest request = new GetDiscoveredNodesRequest(); - request.setRequiredNodes(initialMasterNodes); - request.setTimeout(null); - logger.trace("sending {}", request); - transportService.sendRequest(transportService.getLocalNode(), GetDiscoveredNodesAction.NAME, request, - new TransportResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - assert response.getNodes().stream().allMatch(DiscoveryNode::isMasterNode); - logger.debug("discovered {}, starting to bootstrap", response.getNodes()); - awaitBootstrap(response.getBootstrapConfiguration()); - } - - @Override - public void handleException(TransportException exp) { - logger.warn("discovery attempt failed", exp); - } - - @Override - public String executor() { - return Names.SAME; - } - - @Override - public GetDiscoveredNodesResponse read(StreamInput in) throws IOException { - return new GetDiscoveredNodesResponse(in); - } - }); - } + } + ); } } - public void stop() { - running = false; + private static boolean matchesRequirement(DiscoveryNode discoveryNode, String requirement) { + return discoveryNode.getName().equals(requirement) + || discoveryNode.getAddress().toString().equals(requirement) + || discoveryNode.getAddress().getAddress().equals(requirement); } - private void awaitBootstrap(final BootstrapConfiguration bootstrapConfiguration) { - if (running == false) { - logger.debug("awaitBootstrap: not running"); - return; - } + private Tuple,List> checkRequirements(Set nodes) { + final Set selectedNodes = new HashSet<>(); + final List unmatchedRequirements = new ArrayList<>(); + for (final String bootstrapRequirement : bootstrapRequirements) { + final Set matchingNodes + = nodes.stream().filter(n -> matchesRequirement(n, bootstrapRequirement)).collect(Collectors.toSet()); - BootstrapClusterRequest request = new BootstrapClusterRequest(bootstrapConfiguration); - logger.trace("sending {}", request); - transportService.sendRequest(transportService.getLocalNode(), BootstrapClusterAction.NAME, request, - new TransportResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - logger.debug("automatic cluster bootstrapping successful: received {}", response); - } + if (matchingNodes.size() == 0) { + unmatchedRequirements.add(bootstrapRequirement); + } - @Override - public void handleException(TransportException exp) { - // log a warning since a failure here indicates a bad problem, such as: - // - bootstrap configuration resolution failed (e.g. discovered nodes no longer match those in the bootstrap config) - // - discovered nodes no longer form a quorum in the bootstrap config - logger.warn(new ParameterizedMessage("automatic cluster bootstrapping failed, retrying [{}]", - bootstrapConfiguration.getNodeDescriptions()), exp); - - // There's not really much else we can do apart from retry and hope that the problem goes away. The retry is delayed - // since a tight loop here is unlikely to help. - transportService.getThreadPool().scheduleUnlessShuttingDown(TimeValue.timeValueSeconds(10), Names.SAME, new Runnable() { - @Override - public void run() { - // TODO: remove the following line once schedule method properly preserves thread context - transportService.getThreadPool().getThreadContext().markAsSystemContext(); - awaitBootstrap(bootstrapConfiguration); - } - - @Override - public String toString() { - return "retry bootstrapping with " + bootstrapConfiguration.getNodeDescriptions(); - } - }); - } + if (matchingNodes.size() > 1) { + throw new IllegalStateException("requirement [" + bootstrapRequirement + "] matches multiple nodes: " + matchingNodes); + } - @Override - public String executor() { - return Names.SAME; + for (final DiscoveryNode matchingNode : matchingNodes) { + if (selectedNodes.add(matchingNode) == false) { + throw new IllegalStateException("node [" + matchingNode + "] matches multiple requirements: " + + bootstrapRequirements.stream().filter(r -> matchesRequirement(matchingNode, r)).collect(Collectors.toList())); } + } + } - @Override - public BootstrapClusterResponse read(StreamInput in) throws IOException { - return new BootstrapClusterResponse(in); - } - }); + return Tuple.tuple(selectedNodes, unmatchedRequirements); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index 9fc408fc9479c..cc58628b53893 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -34,6 +34,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; @@ -188,13 +189,22 @@ String getDescription() { private String describeQuorum(VotingConfiguration votingConfiguration) { final Set nodeIds = votingConfiguration.getNodeIds(); assert nodeIds.isEmpty() == false; + final int requiredNodes = nodeIds.size() / 2 + 1; + + final Set realNodeIds = new HashSet<>(nodeIds); + realNodeIds.removeIf(ClusterBootstrapService::isBootstrapPlaceholder); + assert requiredNodes <= realNodeIds.size() : nodeIds; if (nodeIds.size() == 1) { - return "a node with id " + nodeIds; + return "a node with id " + realNodeIds; } else if (nodeIds.size() == 2) { - return "two nodes with ids " + nodeIds; + return "two nodes with ids " + realNodeIds; } else { - return "at least " + (nodeIds.size() / 2 + 1) + " nodes with ids from " + nodeIds; + if (requiredNodes < realNodeIds.size()) { + return "at least " + requiredNodes + " nodes with ids from " + realNodeIds; + } else { + return requiredNodes + " nodes with ids " + realNodeIds; + } } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 72fe2e081de74..4a018c1f78f91 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -24,7 +24,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -83,7 +82,6 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentSet; import static org.elasticsearch.discovery.DiscoverySettings.NO_MASTER_BLOCK_ID; import static org.elasticsearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; @@ -139,8 +137,6 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private JoinHelper.JoinAccumulator joinAccumulator; private Optional currentPublication = Optional.empty(); - private final Set>> discoveredNodesListeners = newConcurrentSet(); - public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSettings, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, AllocationService allocationService, MasterService masterService, Supplier persistedStateSupplier, UnicastHostsProvider unicastHostsProvider, @@ -170,7 +166,8 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.clusterApplier = clusterApplier; masterService.setClusterStateSupplier(this::getStateForMasterService); this.reconfigurator = new Reconfigurator(settings, clusterSettings); - this.clusterBootstrapService = new ClusterBootstrapService(settings, transportService); + this.clusterBootstrapService = new ClusterBootstrapService(settings, transportService, this::getFoundPeers, + this::isInitialConfigurationSet, this::setInitialConfiguration); this.discoveryUpgradeService = new DiscoveryUpgradeService(settings, clusterSettings, transportService, this::isInitialConfigurationSet, joinHelper, peerFinder::getFoundPeers, this::setInitialConfiguration); this.lagDetector = new LagDetector(settings, transportService.getThreadPool(), n -> removeNode(n, "lagging"), @@ -296,12 +293,6 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { becomeFollower("handlePublishRequest", sourceNode); // also updates preVoteCollector } - if (isInitialConfigurationSet()) { - for (final ActionListener> discoveredNodesListener : discoveredNodesListeners) { - discoveredNodesListener.onFailure(new ClusterAlreadyBootstrappedException()); - } - } - return new PublishWithJoinResponse(publishResponse, joinWithDestination(lastJoin, sourceNode, publishRequest.getAcceptedState().term())); } @@ -357,6 +348,12 @@ private void startElection() { // The preVoteCollector is only active while we are candidate, but it does not call this method with synchronisation, so we have // to check our mode again here. if (mode == Mode.CANDIDATE) { + if (electionQuorumContainsLocalNode(getLastAcceptedState()) == false) { + logger.trace("skip election as local node is not part of election quorum: {}", + getLastAcceptedState().coordinationMetaData()); + return; + } + final StartJoinRequest startJoinRequest = new StartJoinRequest(getLocalNode(), Math.max(getCurrentTerm(), maxTermSeen) + 1); logger.debug("starting election with {}", startJoinRequest); @@ -369,6 +366,13 @@ private void startElection() { } } + private static boolean electionQuorumContainsLocalNode(ClusterState lastAcceptedState) { + final String localNodeId = lastAcceptedState.nodes().getLocalNodeId(); + assert localNodeId != null; + return lastAcceptedState.getLastCommittedConfiguration().getNodeIds().contains(localNodeId) + || lastAcceptedState.getLastAcceptedConfiguration().getNodeIds().contains(localNodeId); + } + private Optional ensureTermAtLeast(DiscoveryNode sourceNode, long targetTerm) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; if (getCurrentTerm() < targetTerm) { @@ -455,7 +459,7 @@ void becomeCandidate(String method) { if (mode != Mode.CANDIDATE) { mode = Mode.CANDIDATE; - cancelActivePublication(); + cancelActivePublication("become candidate: " + method); joinAccumulator.close(mode); joinAccumulator = joinHelper.new CandidateJoinAccumulator(); @@ -527,7 +531,7 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { discoveryUpgradeService.deactivate(); clusterFormationFailureHelper.stop(); closePrevotingAndElectionScheduler(); - cancelActivePublication(); + cancelActivePublication("become follower: " + method); preVoteCollector.update(getPreVoteResponse(), leaderNode); if (restartLeaderChecker) { @@ -598,16 +602,12 @@ public void startInitialJoin() { synchronized (mutex) { becomeCandidate("startInitialJoin"); } - - if (isInitialConfigurationSet() == false) { - clusterBootstrapService.start(); - } + clusterBootstrapService.scheduleUnconfiguredBootstrap(); } @Override protected void doStop() { configuredHostsResolver.stop(); - clusterBootstrapService.stop(); } @Override @@ -706,21 +706,6 @@ public boolean isInitialConfigurationSet() { return getStateForMasterService().getLastAcceptedConfiguration().isEmpty() == false; } - /** - * Sets the initial configuration by resolving the given {@link BootstrapConfiguration} to concrete nodes. This method is safe to call - * more than once, as long as each call's bootstrap configuration resolves to the same set of nodes. - * - * @param bootstrapConfiguration A description of the nodes that should form the initial configuration. - * @return whether this call successfully set the initial configuration - if false, the cluster has already been bootstrapped. - */ - public boolean setInitialConfiguration(final BootstrapConfiguration bootstrapConfiguration) { - final List selfAndDiscoveredPeers = new ArrayList<>(); - selfAndDiscoveredPeers.add(getLocalNode()); - getFoundPeers().forEach(selfAndDiscoveredPeers::add); - final VotingConfiguration votingConfiguration = bootstrapConfiguration.resolve(selfAndDiscoveredPeers); - return setInitialConfiguration(votingConfiguration); - } - /** * Sets the initial configuration to the given {@link VotingConfiguration}. This method is safe to call * more than once, as long as the argument to each call is the same. @@ -733,17 +718,28 @@ public boolean setInitialConfiguration(final VotingConfiguration votingConfigura final ClusterState currentState = getStateForMasterService(); if (isInitialConfigurationSet()) { + logger.debug("initial configuration already set, ignoring {}", votingConfiguration); return false; } - if (mode != Mode.CANDIDATE) { - throw new CoordinationStateRejectedException("Cannot set initial configuration in mode " + mode); + if (getLocalNode().isMasterNode() == false) { + logger.debug("skip setting initial configuration as local node is not a master-eligible node"); + throw new CoordinationStateRejectedException( + "this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node"); + } + + if (votingConfiguration.getNodeIds().contains(getLocalNode().getId()) == false) { + logger.debug("skip setting initial configuration as local node is not part of initial configuration"); + throw new CoordinationStateRejectedException("local node is not part of initial configuration"); } final List knownNodes = new ArrayList<>(); knownNodes.add(getLocalNode()); peerFinder.getFoundPeers().forEach(knownNodes::add); + if (votingConfiguration.hasQuorum(knownNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toList())) == false) { + logger.debug("skip setting initial configuration as not enough nodes discovered to form a quorum in the " + + "initial configuration [knownNodes={}, {}]", knownNodes, votingConfiguration); throw new CoordinationStateRejectedException("not enough nodes discovered to form a quorum in the initial configuration " + "[knownNodes=" + knownNodes + ", " + votingConfiguration + "]"); } @@ -760,6 +756,8 @@ public boolean setInitialConfiguration(final VotingConfiguration votingConfigura metaDataBuilder.coordinationMetaData(coordinationMetaData); coordinationState.get().setInitialState(ClusterState.builder(currentState).metaData(metaDataBuilder).build()); + assert electionQuorumContainsLocalNode(getLastAcceptedState()) : + "initial state does not have local node in its election quorum: " + getLastAcceptedState().coordinationMetaData(); preVoteCollector.update(getPreVoteResponse(), null); // pick up the change to last-accepted version startElectionScheduler(); return true; @@ -899,8 +897,6 @@ private ClusterState clusterStateWithNoMasterBlock(ClusterState clusterState) { public void publish(ClusterChangedEvent clusterChangedEvent, ActionListener publishListener, AckListener ackListener) { try { synchronized (mutex) { - assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; - if (mode != Mode.LEADER) { logger.debug(() -> new ParameterizedMessage("[{}] failed publication as not currently leading", clusterChangedEvent.source())); @@ -935,7 +931,7 @@ assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId()) @Override public void run() { synchronized (mutex) { - publication.onTimeout(); + publication.cancel("timed out after " + publishTimeout); } } @@ -991,10 +987,10 @@ public void onFailure(Exception e) { }; } - private void cancelActivePublication() { + private void cancelActivePublication(String reason) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; if (currentPublication.isPresent()) { - currentPublication.get().onTimeout(); + currentPublication.get().cancel(reason); } } @@ -1019,9 +1015,8 @@ protected void onActiveMasterFound(DiscoveryNode masterNode, long term) { @Override protected void onFoundPeersUpdated() { - final Iterable foundPeers; synchronized (mutex) { - foundPeers = getFoundPeers(); + final Iterable foundPeers = getFoundPeers(); if (mode == Mode.CANDIDATE) { final CoordinationState.VoteCollection expectedVotes = new CoordinationState.VoteCollection(); foundPeers.forEach(expectedVotes::addVote); @@ -1039,9 +1034,7 @@ protected void onFoundPeersUpdated() { } } - for (final ActionListener> discoveredNodesListener : discoveredNodesListeners) { - discoveredNodesListener.onResponse(foundPeers); - } + clusterBootstrapService.onFoundPeersUpdated(); } } @@ -1058,12 +1051,20 @@ private void startElectionScheduler() { public void run() { synchronized (mutex) { if (mode == Mode.CANDIDATE) { + final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); + + if (electionQuorumContainsLocalNode(lastAcceptedState) == false) { + logger.trace("skip prevoting as local node is not part of election quorum: {}", + lastAcceptedState.coordinationMetaData()); + return; + } + if (prevotingRound != null) { prevotingRound.close(); } - final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); final List discoveredNodes = getDiscoveredNodes().stream().filter(n -> isZen1Node(n) == false).collect(Collectors.toList()); + prevotingRound = preVoteCollector.start(lastAcceptedState, discoveredNodes); } } @@ -1076,14 +1077,6 @@ public String toString() { }); } - public Releasable withDiscoveryListener(ActionListener> listener) { - discoveredNodesListeners.add(listener); - return () -> { - boolean removed = discoveredNodesListeners.remove(listener); - assert removed : listener; - }; - } - public Iterable getFoundPeers() { // TODO everyone takes this and adds the local node. Maybe just add the local node here? return peerFinder.getFoundPeers(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java index b7f99df31c018..641f3941cb3bd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java @@ -135,7 +135,7 @@ public void setCurrentNodes(DiscoveryNodes discoveryNodes) { followerCheckers.keySet().removeIf(isUnknownNode); faultyNodes.removeIf(isUnknownNode); - for (final DiscoveryNode discoveryNode : discoveryNodes) { + discoveryNodes.mastersFirstStream().forEach(discoveryNode -> { if (discoveryNode.equals(discoveryNodes.getLocalNode()) == false && followerCheckers.containsKey(discoveryNode) == false && faultyNodes.contains(discoveryNode) == false) { @@ -144,7 +144,7 @@ public void setCurrentNodes(DiscoveryNodes discoveryNodes) { followerCheckers.put(discoveryNode, followerChecker); followerChecker.start(); } - } + }); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java index c4c76d8a8fe74..b754e50a945c1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.ArrayList; import java.util.Collection; @@ -187,7 +188,8 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState .blocks(currentState.blocks()) .removeGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID)).build(); logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes()); - return ClusterState.builder(allocationService.deassociateDeadNodes(tmpState, false, "removed dead nodes on election")); + tmpState = PersistentTasksCustomMetaData.disassociateDeadNodes(tmpState); + return ClusterState.builder(allocationService.disassociateDeadNodes(tmpState, false, "removed dead nodes on election")); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java index 58d4b04444b2f..bbfe64988c7cf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.List; @@ -91,8 +92,9 @@ public ClusterTasksResult execute(final ClusterState currentState, final L protected ClusterTasksResult getTaskClusterTasksResult(ClusterState currentState, List tasks, ClusterState remainingNodesClusterState) { + ClusterState ptasksDisassociatedState = PersistentTasksCustomMetaData.disassociateDeadNodes(remainingNodesClusterState); final ClusterTasksResult.Builder resultBuilder = ClusterTasksResult.builder().successes(tasks); - return resultBuilder.build(allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks))); + return resultBuilder.build(allocationService.disassociateDeadNodes(ptasksDisassociatedState, true, describeTasks(tasks))); } // visible for testing diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java index 9ec8d562b81ba..4aea820d6d9e0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java @@ -49,7 +49,7 @@ public abstract class Publication { private Optional applyCommitRequest; // set when state is committed private boolean isCompleted; // set when publication is completed - private boolean timedOut; // set when publication timed out + private boolean cancelled; // set when publication is cancelled public Publication(PublishRequest publishRequest, AckListener ackListener, LongSupplier currentTimeSupplier) { this.publishRequest = publishRequest; @@ -58,7 +58,7 @@ public Publication(PublishRequest publishRequest, AckListener ackListener, LongS startTime = currentTimeSupplier.getAsLong(); applyCommitRequest = Optional.empty(); publicationTargets = new ArrayList<>(publishRequest.getAcceptedState().getNodes().getNodes().size()); - publishRequest.getAcceptedState().getNodes().iterator().forEachRemaining(n -> publicationTargets.add(new PublicationTarget(n))); + publishRequest.getAcceptedState().getNodes().mastersFirstStream().forEach(n -> publicationTargets.add(new PublicationTarget(n))); } public void start(Set faultyNodes) { @@ -71,17 +71,17 @@ public void start(Set faultyNodes) { publicationTargets.forEach(PublicationTarget::sendPublishRequest); } - public void onTimeout() { + public void cancel(String reason) { if (isCompleted) { return; } - assert timedOut == false; - timedOut = true; + assert cancelled == false; + cancelled = true; if (applyCommitRequest.isPresent() == false) { - logger.debug("onTimeout: [{}] timed out before committing", this); + logger.debug("cancel: [{}] cancelled before committing (reason: {})", this, reason); // fail all current publications - final Exception e = new ElasticsearchException("publication timed out before committing"); + final Exception e = new ElasticsearchException("publication cancelled before committing: " + reason); publicationTargets.stream().filter(PublicationTarget::isActive).forEach(pt -> pt.setFailed(e)); } onPossibleCompletion(); @@ -101,7 +101,7 @@ private void onPossibleCompletion() { return; } - if (timedOut == false) { + if (cancelled == false) { for (final PublicationTarget target : publicationTargets) { if (target.isActive()) { return; @@ -125,8 +125,8 @@ private void onPossibleCompletion() { } // For assertions only: verify that this invariant holds - private boolean publicationCompletedIffAllTargetsInactiveOrTimedOut() { - if (timedOut == false) { + private boolean publicationCompletedIffAllTargetsInactiveOrCancelled() { + if (cancelled == false) { for (final PublicationTarget target : publicationTargets) { if (target.isActive()) { return isCompleted == false; @@ -222,7 +222,7 @@ void sendPublishRequest() { state = PublicationTargetState.SENT_PUBLISH_REQUEST; Publication.this.sendPublishRequest(discoveryNode, publishRequest, new PublishResponseHandler()); // TODO Can this ^ fail with an exception? Target should be failed if so. - assert publicationCompletedIffAllTargetsInactiveOrTimedOut(); + assert publicationCompletedIffAllTargetsInactiveOrCancelled(); } void handlePublishResponse(PublishResponse publishResponse) { @@ -245,7 +245,7 @@ void sendApplyCommit() { state = PublicationTargetState.SENT_APPLY_COMMIT; assert applyCommitRequest.isPresent(); Publication.this.sendApplyCommit(discoveryNode, applyCommitRequest.get(), new ApplyCommitResponseHandler()); - assert publicationCompletedIffAllTargetsInactiveOrTimedOut(); + assert publicationCompletedIffAllTargetsInactiveOrCancelled(); } void setAppliedCommit() { @@ -300,7 +300,7 @@ private class PublishResponseHandler implements ActionListener {}, new ParseField(DELETE_DATE_KEY)); } - static final DateFormatter FORMATTER = DateFormatters.forPattern("strict_date_optional_time").withZone(ZoneOffset.UTC); + static final DateFormatter FORMATTER = DateFormatter.forPattern("strict_date_optional_time").withZone(ZoneOffset.UTC); static ContextParser getParser() { return (parser, context) -> TOMBSTONE_PARSER.apply(parser, null).build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index f60866383107a..050d97ba54cf0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -28,8 +28,8 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateMathParser; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -819,7 +819,7 @@ private static List resolveEmptyOrTrivialWildcard(IndicesOptions options static final class DateMathExpressionResolver implements ExpressionResolver { - private static final DateFormatter DEFAULT_DATE_FORMATTER = DateFormatters.forPattern("uuuu.MM.dd"); + private static final DateFormatter DEFAULT_DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); private static final String EXPRESSION_LEFT_BOUND = "<"; private static final String EXPRESSION_RIGHT_BOUND = ">"; private static final char LEFT_BOUND = '{'; @@ -912,18 +912,19 @@ String resolveExpression(String expression, final Context context) { int formatPatternTimeZoneSeparatorIndex = patternAndTZid.indexOf(TIME_ZONE_BOUND); if (formatPatternTimeZoneSeparatorIndex != -1) { dateFormatterPattern = patternAndTZid.substring(0, formatPatternTimeZoneSeparatorIndex); - timeZone = ZoneId.of(patternAndTZid.substring(formatPatternTimeZoneSeparatorIndex + 1)); + timeZone = DateUtils.of(patternAndTZid.substring(formatPatternTimeZoneSeparatorIndex + 1)); } else { dateFormatterPattern = patternAndTZid; timeZone = ZoneOffset.UTC; } - dateFormatter = DateFormatters.forPattern(dateFormatterPattern); + dateFormatter = DateFormatter.forPattern(dateFormatterPattern); } + DateFormatter formatter = dateFormatter.withZone(timeZone); DateMathParser dateMathParser = formatter.toDateMathParser(); - long millis = dateMathParser.parse(mathExpression, context::getStartTime, false, timeZone); + Instant instant = dateMathParser.parse(mathExpression, context::getStartTime, false, timeZone); - String time = formatter.format(Instant.ofEpochMilli(millis)); + String time = formatter.format(instant); beforePlaceHolderSb.append(time); inPlaceHolderSb = new StringBuilder(); inPlaceHolder = false; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index e21aa95865a6f..e66b55b1a7ed6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -189,7 +189,7 @@ public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException Builder builder = new Builder(in.readString()); builder.order(in.readInt()); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - builder.patterns(in.readList(StreamInput::readString)); + builder.patterns(in.readStringList()); } else { builder.patterns(Collections.singletonList(in.readString())); } @@ -224,7 +224,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeInt(order); if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - out.writeStringList(patterns); + out.writeStringCollection(patterns); } else { out.writeString(patterns.get(0)); } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 2e0e63186917b..02f4d5d93bf4e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -40,6 +40,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Stream; import java.util.stream.StreamSupport; /** @@ -161,6 +162,14 @@ public ImmutableOpenMap getCoordinatingOnlyNodes() { return nodes.build(); } + /** + * Returns a stream of all nodes, with master nodes at the front + */ + public Stream mastersFirstStream() { + return Stream.concat(StreamSupport.stream(masterNodes.spliterator(), false).map(cur -> cur.value), + StreamSupport.stream(this.spliterator(), false).filter(n -> n.isMasterNode() == false)); + } + /** * Get a node by its id * diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java index 9349de94a66e2..c9cc5b628e292 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java @@ -45,7 +45,7 @@ * the delay marker). These are shards that have become unassigned due to a node leaving * and which were assigned the delay marker based on the index delay setting * {@link UnassignedInfo#INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING} - * (see {@link AllocationService#deassociateDeadNodes(RoutingAllocation)}). + * (see {@link AllocationService#disassociateDeadNodes(RoutingAllocation)}. * This class is responsible for choosing the next (closest) delay expiration of a * delayed shard to schedule a reroute to remove the delay marker. * The actual removal of the delay marker happens in diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 7e8fdb1edaca2..af617dccf6734 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -327,7 +327,7 @@ public ShardRouting activePrimary(ShardId shardId) { * */ public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) { - // It's possible for replicaNodeVersion to be null, when deassociating dead nodes + // It's possible for replicaNodeVersion to be null, when disassociating dead nodes // that have been removed, the shards are failed, and part of the shard failing // calls this method with an out-of-date RoutingNodes, where the version might not // be accessible. Therefore, we need to protect against the version being null diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 57f7a313942bb..89e19e02b30ed 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -77,11 +77,6 @@ protected void doClose() { * Initiates a reroute. */ public final void reroute(String reason) { - performReroute(reason); - } - - // visible for testing - protected void performReroute(String reason) { try { if (lifecycle.stopped()) { return; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 21885d1788c7e..f8afbeb449361 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -48,7 +47,7 @@ */ public final class UnassignedInfo implements ToXContentFragment, Writeable { - public static final DateFormatter DATE_TIME_FORMATTER = DateFormatters.forPattern("dateOptionalTime").withZone(ZoneOffset.UTC); + public static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("dateOptionalTime").withZone(ZoneOffset.UTC); public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.positiveTimeSetting("index.unassigned.node_left.delayed_timeout", TimeValue.timeValueMinutes(1), Property.Dynamic, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 59f43a193ddc8..fd3fb8edd5e17 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -216,7 +216,7 @@ public ClusterState applyFailedShards(final ClusterState clusterState, final Lis * unassigned an shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas * if needed. */ - public ClusterState deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) { + public ClusterState disassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) { RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // shuffle the unassigned nodes, just so we won't have things like poison failed shards routingNodes.unassigned().shuffle(); @@ -224,7 +224,7 @@ public ClusterState deassociateDeadNodes(ClusterState clusterState, boolean rero clusterInfoService.getClusterInfo(), currentNanoTime()); // first, clear from the shards any node id they used to belong to that is now dead - deassociateDeadNodes(allocation); + disassociateDeadNodes(allocation); if (allocation.routingNodesChanged()) { clusterState = buildResult(clusterState, allocation); @@ -400,7 +400,7 @@ private boolean hasDeadNodes(RoutingAllocation allocation) { } private void reroute(RoutingAllocation allocation) { - assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See deassociateDeadNodes"; + assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See disassociateDeadNodes"; assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metaData(), allocation.nodes()).isEmpty() : "auto-expand replicas out of sync with number of nodes in the cluster"; @@ -414,7 +414,7 @@ private void reroute(RoutingAllocation allocation) { assert RoutingNodes.assertShardStats(allocation.routingNodes()); } - private void deassociateDeadNodes(RoutingAllocation allocation) { + private void disassociateDeadNodes(RoutingAllocation allocation) { for (Iterator it = allocation.routingNodes().mutableIterator(); it.hasNext(); ) { RoutingNode node = it.next(); if (allocation.nodes().getDataNodes().containsKey(node.nodeId())) { @@ -425,7 +425,7 @@ private void deassociateDeadNodes(RoutingAllocation allocation) { for (ShardRouting shardRouting : node.copyShards()) { final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0; - UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", + UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left [" + node.nodeId() + "]", null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed, AllocationStatus.NO_ATTEMPT); allocation.routingNodes().failShard(logger, shardRouting, unassignedInfo, indexMetaData, allocation.changes()); } diff --git a/server/src/main/java/org/elasticsearch/common/Rounding.java b/server/src/main/java/org/elasticsearch/common/Rounding.java index 593964f61e93f..dab29c88634e9 100644 --- a/server/src/main/java/org/elasticsearch/common/Rounding.java +++ b/server/src/main/java/org/elasticsearch/common/Rounding.java @@ -19,9 +19,11 @@ package org.elasticsearch.common; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; @@ -188,7 +190,7 @@ static class TimeUnitRounding extends Rounding { TimeUnitRounding(StreamInput in) throws IOException { unit = DateTimeUnit.resolve(in.readByte()); - timeZone = ZoneId.of(in.readString()); + timeZone = DateUtils.of(in.readString()); unitRoundsToMidnight = unit.getField().getBaseUnit().getDuration().toMillis() > 60L * 60L * 1000L; } @@ -367,8 +369,11 @@ public long nextRoundingValue(long utcMillis) { @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeByte(unit.getId()); - String tz = ZoneOffset.UTC.equals(timeZone) ? "UTC" : timeZone.getId(); // stay joda compatible - out.writeString(tz); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeString(timeZone.getId()); + } else { + out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); + } } @Override @@ -417,7 +422,7 @@ public String toString() { TimeIntervalRounding(StreamInput in) throws IOException { interval = in.readVLong(); - timeZone = ZoneId.of(in.readString()); + timeZone = DateUtils.of(in.readString()); } @Override @@ -490,8 +495,11 @@ public long nextRoundingValue(long time) { @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeVLong(interval); - String tz = ZoneOffset.UTC.equals(timeZone) ? "UTC" : timeZone.getId(); // stay joda compatible - out.writeString(tz); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeString(timeZone.getId()); + } else { + out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/common/Table.java b/server/src/main/java/org/elasticsearch/common/Table.java index a41fd267329ff..d097783a838f3 100644 --- a/server/src/main/java/org/elasticsearch/common/Table.java +++ b/server/src/main/java/org/elasticsearch/common/Table.java @@ -20,7 +20,6 @@ package org.elasticsearch.common; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import java.time.Instant; import java.time.ZoneOffset; @@ -85,7 +84,7 @@ public Table endHeaders() { return this; } - private static final DateFormatter FORMATTER = DateFormatters.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); + private static final DateFormatter FORMATTER = DateFormatter.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); public Table startRow() { if (headers.isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index fd9ffdfd31d16..723de8fd5da31 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -653,6 +653,23 @@ public DateTimeZone readOptionalTimeZone() throws IOException { return null; } + /** + * Read a {@linkplain DateTimeZone}. + */ + public ZoneId readZoneId() throws IOException { + return ZoneId.of(readString()); + } + + /** + * Read an optional {@linkplain ZoneId}. + */ + public ZoneId readOptionalZoneId() throws IOException { + if (readBoolean()) { + return ZoneId.of(readString()); + } + return null; + } + public int[] readIntArray() throws IOException { int length = readArraySize(); int[] values = new int[length]; @@ -929,12 +946,26 @@ public List readStreamableList(Supplier constructor } /** - * Reads a list of objects + * Reads a list of objects. The list is expected to have been written using {@link StreamOutput#writeList(List)} or + * {@link StreamOutput#writeStreamableList(List)}. + * + * @return the list of objects + * @throws IOException if an I/O exception occurs reading the list */ - public List readList(Writeable.Reader reader) throws IOException { + public List readList(final Writeable.Reader reader) throws IOException { return readCollection(reader, ArrayList::new); } + /** + * Reads a list of strings. The list is expected to have been written using {@link StreamOutput#writeStringCollection(Collection)}. + * + * @return the list of strings + * @throws IOException if an I/O exception occurs reading the list + */ + public List readStringList() throws IOException { + return readList(StreamInput::readString); + } + /** * Reads a set of objects */ diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 8131335602693..e9709de1a44a3 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -55,6 +55,7 @@ import java.nio.file.FileSystemLoopException; import java.nio.file.NoSuchFileException; import java.nio.file.NotDirectoryException; +import java.time.ZoneId; import java.time.ZonedDateTime; import java.util.Collection; import java.util.Collections; @@ -677,7 +678,6 @@ public final void writeMap(final Map map, final Writer keyWriter writers.put(ZonedDateTime.class, (o, v) -> { o.writeByte((byte) 23); final ZonedDateTime zonedDateTime = (ZonedDateTime) v; - zonedDateTime.getZone().getId(); o.writeString(zonedDateTime.getZone().getId()); o.writeLong(zonedDateTime.toInstant().toEpochMilli()); }); @@ -988,6 +988,13 @@ public void writeTimeZone(DateTimeZone timeZone) throws IOException { writeString(timeZone.getID()); } + /** + * Write a {@linkplain ZoneId} to the stream. + */ + public void writeZoneId(ZoneId timeZone) throws IOException { + writeString(timeZone.getId()); + } + /** * Write an optional {@linkplain DateTimeZone} to the stream. */ @@ -1000,6 +1007,18 @@ public void writeOptionalTimeZone(@Nullable DateTimeZone timeZone) throws IOExce } } + /** + * Write an optional {@linkplain ZoneId} to the stream. + */ + public void writeOptionalZoneId(@Nullable ZoneId timeZone) throws IOException { + if (timeZone == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeZoneId(timeZone); + } + } + /** * Writes a list of {@link Streamable} objects */ @@ -1029,23 +1048,27 @@ public void writeList(List list) throws IOException { } /** - * Writes a collection of generic objects via a {@link Writer} + * Writes a collection of objects via a {@link Writer}. + * + * @param collection the collection of objects + * @throws IOException if an I/O exception occurs writing the collection */ - public void writeCollection(Collection collection, Writer writer) throws IOException { + public void writeCollection(final Collection collection, final Writer writer) throws IOException { writeVInt(collection.size()); - for (T val: collection) { + for (final T val: collection) { writer.write(this, val); } } /** - * Writes a list of strings + * Writes a collection of a strings. The corresponding collection can be read from a stream input using + * {@link StreamInput#readList(Writeable.Reader)}. + * + * @param collection the collection of strings + * @throws IOException if an I/O exception occurs writing the collection */ - public void writeStringList(List list) throws IOException { - writeVInt(list.size()); - for (String string: list) { - this.writeString(string); - } + public void writeStringCollection(final Collection collection) throws IOException { + writeCollection(collection, StreamOutput::writeString); } /** diff --git a/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java index 5db95b12bb437..706e995530962 100644 --- a/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java @@ -31,12 +31,12 @@ import java.time.ZonedDateTime; import java.time.temporal.TemporalAccessor; import java.util.Locale; +import java.util.Objects; public class JodaDateFormatter implements DateFormatter { - final String pattern; + final String pattern; final DateTimeFormatter parser; - final DateTimeFormatter printer; public JodaDateFormatter(String pattern, DateTimeFormatter parser, DateTimeFormatter printer) { @@ -108,4 +108,21 @@ public ZoneId zone() { public DateMathParser toDateMathParser() { return new JodaDateMathParser(this); } + + @Override + public int hashCode() { + return Objects.hash(locale(), zone(), pattern()); + } + + @Override + public boolean equals(Object obj) { + if (obj.getClass().equals(this.getClass()) == false) { + return false; + } + JodaDateFormatter other = (JodaDateFormatter) obj; + + return Objects.equals(pattern(), other.pattern()) && + Objects.equals(locale(), other.locale()) && + Objects.equals(zone(), other.zone()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/joda/JodaDateMathParser.java b/server/src/main/java/org/elasticsearch/common/joda/JodaDateMathParser.java index b86af7a75a55f..b7522c6a3233e 100644 --- a/server/src/main/java/org/elasticsearch/common/joda/JodaDateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/joda/JodaDateMathParser.java @@ -26,6 +26,7 @@ import org.joda.time.MutableDateTime; import org.joda.time.format.DateTimeFormatter; +import java.time.Instant; import java.time.ZoneId; import java.util.Objects; import java.util.function.LongSupplier; @@ -50,7 +51,7 @@ public JodaDateMathParser(JodaDateFormatter dateTimeFormatter) { // if it has been used. For instance, the request cache does not cache requests that make // use of `now`. @Override - public long parse(String text, LongSupplier now, boolean roundUp, ZoneId tz) { + public Instant parse(String text, LongSupplier now, boolean roundUp, ZoneId tz) { final DateTimeZone timeZone = tz == null ? null : DateUtils.zoneIdToDateTimeZone(tz); long time; String mathString; @@ -64,13 +65,13 @@ public long parse(String text, LongSupplier now, boolean roundUp, ZoneId tz) { } else { int index = text.indexOf("||"); if (index == -1) { - return parseDateTime(text, timeZone, roundUp); + return Instant.ofEpochMilli(parseDateTime(text, timeZone, roundUp)); } time = parseDateTime(text.substring(0, index), timeZone, false); mathString = text.substring(index + 2); } - return parseMath(mathString, time, roundUp, timeZone); + return Instant.ofEpochMilli(parseMath(mathString, time, roundUp, timeZone)); } private long parseMath(String mathString, long time, boolean roundUp, DateTimeZone timeZone) throws ElasticsearchParseException { diff --git a/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index 1eb3d52b46cde..0c77271c7ed0f 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -29,14 +29,8 @@ import java.nio.charset.Charset; import java.security.AccessController; import java.security.PrivilegedAction; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.time.format.DateTimeFormatterBuilder; -import java.time.format.SignStyle; import java.util.BitSet; import java.util.Collections; -import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Locale; @@ -47,14 +41,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import static java.time.temporal.ChronoField.DAY_OF_MONTH; -import static java.time.temporal.ChronoField.DAY_OF_WEEK; -import static java.time.temporal.ChronoField.HOUR_OF_DAY; -import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; -import static java.time.temporal.ChronoField.MONTH_OF_YEAR; -import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; -import static java.time.temporal.ChronoField.YEAR; - /** * A logger that logs deprecation notices. */ @@ -165,73 +151,14 @@ public void deprecatedAndMaybeLog(final String key, final String msg, final Obje Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : "", Build.CURRENT.shortHash()); - /* - * RFC 7234 section 5.5 specifies that the warn-date is a quoted HTTP-date. HTTP-date is defined in RFC 7234 Appendix B as being from - * RFC 7231 section 7.1.1.1. RFC 7231 specifies an HTTP-date as an IMF-fixdate (or an obs-date referring to obsolete formats). The - * grammar for IMF-fixdate is specified as 'day-name "," SP date1 SP time-of-day SP GMT'. Here, day-name is - * (Mon|Tue|Wed|Thu|Fri|Sat|Sun). Then, date1 is 'day SP month SP year' where day is 2DIGIT, month is - * (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec), and year is 4DIGIT. Lastly, time-of-day is 'hour ":" minute ":" second' where - * hour is 2DIGIT, minute is 2DIGIT, and second is 2DIGIT. Finally, 2DIGIT and 4DIGIT have the obvious definitions. - */ - private static final DateTimeFormatter RFC_7231_DATE_TIME; - - static { - final Map dow = new HashMap<>(); - dow.put(1L, "Mon"); - dow.put(2L, "Tue"); - dow.put(3L, "Wed"); - dow.put(4L, "Thu"); - dow.put(5L, "Fri"); - dow.put(6L, "Sat"); - dow.put(7L, "Sun"); - final Map moy = new HashMap<>(); - moy.put(1L, "Jan"); - moy.put(2L, "Feb"); - moy.put(3L, "Mar"); - moy.put(4L, "Apr"); - moy.put(5L, "May"); - moy.put(6L, "Jun"); - moy.put(7L, "Jul"); - moy.put(8L, "Aug"); - moy.put(9L, "Sep"); - moy.put(10L, "Oct"); - moy.put(11L, "Nov"); - moy.put(12L, "Dec"); - RFC_7231_DATE_TIME = new DateTimeFormatterBuilder() - .parseCaseInsensitive() - .parseLenient() - .optionalStart() - .appendText(DAY_OF_WEEK, dow) - .appendLiteral(", ") - .optionalEnd() - .appendValue(DAY_OF_MONTH, 2, 2, SignStyle.NOT_NEGATIVE) - .appendLiteral(' ') - .appendText(MONTH_OF_YEAR, moy) - .appendLiteral(' ') - .appendValue(YEAR, 4) - .appendLiteral(' ') - .appendValue(HOUR_OF_DAY, 2) - .appendLiteral(':') - .appendValue(MINUTE_OF_HOUR, 2) - .optionalStart() - .appendLiteral(':') - .appendValue(SECOND_OF_MINUTE, 2) - .optionalEnd() - .appendLiteral(' ') - .appendOffset("+HHMM", "GMT") - .toFormatter(Locale.getDefault(Locale.Category.FORMAT)); - } - - private static final String STARTUP_TIME = RFC_7231_DATE_TIME.format(ZonedDateTime.now(ZoneId.of("GMT"))); - /** * Regular expression to test if a string matches the RFC7234 specification for warning headers. This pattern assumes that the warn code * is always 299. Further, this pattern assumes that the warn agent represents a version of Elasticsearch including the build hash. */ - public static Pattern WARNING_HEADER_PATTERN = Pattern.compile( + public static final Pattern WARNING_HEADER_PATTERN = Pattern.compile( "299 " + // warn code "Elasticsearch-\\d+\\.\\d+\\.\\d+(?:-(?:alpha|beta|rc)\\d+)?(?:-SNAPSHOT)?-(?:[a-f0-9]{7}|Unknown) " + // warn agent - "\"((?:\t| |!|[\\x23-\\x5B]|[\\x5D-\\x7E]|[\\x80-\\xFF]|\\\\|\\\\\")*)\" " + // quoted warning value, captured + "\"((?:\t| |!|[\\x23-\\x5B]|[\\x5D-\\x7E]|[\\x80-\\xFF]|\\\\|\\\\\")*)\"( " + // quoted warning value, captured // quoted RFC 1123 date format "\"" + // opening quote "(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // weekday @@ -240,12 +167,11 @@ public void deprecatedAndMaybeLog(final String key, final String msg, final Obje "\\d{4} " + // 4-digit year "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) "GMT" + // GMT - "\""); // closing quote + "\")?"); // closing quote (optional, since an older version can still send a warn-date) /** * Extracts the warning value from the value of a warning header that is formatted according to RFC 7234. That is, given a string - * {@code 299 Elasticsearch-6.0.0 "warning value" "Sat, 25 Feb 2017 10:27:43 GMT"}, the return value of this method would be {@code - * warning value}. + * {@code 299 Elasticsearch-6.0.0 "warning value"}, the return value of this method would be {@code warning value}. * * @param s the value of a warning header formatted according to RFC 7234. * @return the extracted warning value @@ -253,23 +179,19 @@ public void deprecatedAndMaybeLog(final String key, final String msg, final Obje public static String extractWarningValueFromWarningHeader(final String s) { /* * We know the exact format of the warning header, so to extract the warning value we can skip forward from the front to the first - * quote, and skip backwards from the end to the penultimate quote: + * quote and we know the last quote is at the end of the string * - * 299 Elasticsearch-6.0.0 "warning value" "Sat, 25, Feb 2017 10:27:43 GMT" - * ^ ^ ^ - * firstQuote penultimateQuote lastQuote - * - * We do it this way rather than seeking forward after the first quote because there could be escaped quotes in the warning value - * but since there are none in the warning date, we can skip backwards to find the quote that closes the quoted warning value. + * 299 Elasticsearch-6.0.0 "warning value" + * ^ ^ + * firstQuote lastQuote * * We parse this manually rather than using the capturing regular expression because the regular expression involves a lot of * backtracking and carries a performance penalty. However, when assertions are enabled, we still use the regular expression to * verify that we are maintaining the warning header format. */ final int firstQuote = s.indexOf('\"'); - final int lastQuote = s.lastIndexOf('\"'); - final int penultimateQuote = s.lastIndexOf('\"', lastQuote - 1); - final String warningValue = s.substring(firstQuote + 1, penultimateQuote - 2); + final int lastQuote = s.length() - 1; + final String warningValue = s.substring(firstQuote + 1, lastQuote); assert assertWarningValue(s, warningValue); return warningValue; } @@ -299,7 +221,6 @@ void deprecated(final Set threadContexts, final String message, f deprecated(threadContexts, message, true, params); } - void deprecated(final Set threadContexts, final String message, final boolean log, final Object... params) { final Iterator iterator = threadContexts.iterator(); @@ -338,9 +259,7 @@ public Void run() { * @return a warning value formatted according to RFC 7234 */ public static String formatWarning(final String s) { - return WARNING_PREFIX + " " - + "\"" + escapeAndEncode(s) + "\"" + " " - + "\"" + STARTUP_TIME + "\""; + return WARNING_PREFIX + " " + "\"" + escapeAndEncode(s) + "\""; } /** @@ -451,7 +370,7 @@ static String encode(final String s) { int startIndex = i; do { i++; - } while (i < s.length() && !doesNotNeedEncoding.get(s.charAt(i))); + } while (i < s.length() && doesNotNeedEncoding.get(s.charAt(i)) == false); final byte[] bytes = s.substring(startIndex, i).getBytes(UTF_8); // noinspection ForLoopReplaceableByForEach diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index b49f0f8225016..9d3e278e889f6 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -296,6 +296,54 @@ public void apply(Map> values, Settings current, Settings pr }); } + /** + * Adds a affix settings consumer that accepts the settings for a group of settings. The consumer is only + * notified if at least one of the settings change. + *

+ * Note: Only settings registered in {@link SettingsModule} can be changed dynamically. + *

+ */ + public synchronized void addAffixGroupUpdateConsumer(List> settings, BiConsumer consumer) { + List affixUpdaters = new ArrayList<>(settings.size()); + for (Setting.AffixSetting setting : settings) { + ensureSettingIsRegistered(setting); + affixUpdaters.add(setting.newAffixUpdater((a,b)-> {}, logger, (a,b)-> {})); + } + + addSettingsUpdater(new SettingUpdater>() { + + @Override + public boolean hasChanged(Settings current, Settings previous) { + return affixUpdaters.stream().anyMatch(au -> au.hasChanged(current, previous)); + } + + @Override + public Map getValue(Settings current, Settings previous) { + Set namespaces = new HashSet<>(); + for (Setting.AffixSetting setting : settings) { + SettingUpdater affixUpdaterA = setting.newAffixUpdater((k, v) -> namespaces.add(k), logger, (a, b) ->{}); + affixUpdaterA.apply(current, previous); + } + Map namespaceToSettings = new HashMap<>(namespaces.size()); + for (String namespace : namespaces) { + Set concreteSettings = new HashSet<>(settings.size()); + for (Setting.AffixSetting setting : settings) { + concreteSettings.add(setting.getConcreteSettingForNamespace(namespace).getKey()); + } + namespaceToSettings.put(namespace, current.filter(concreteSettings::contains)); + } + return namespaceToSettings; + } + + @Override + public void apply(Map values, Settings current, Settings previous) { + for (Map.Entry entry : values.entrySet()) { + consumer.accept(entry.getKey(), entry.getValue()); + } + } + }); + } + private void ensureSettingIsRegistered(Setting.AffixSetting setting) { final Setting registeredSetting = this.complexMatchers.get(setting.getKey()); if (setting != registeredSetting) { diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java index e89317ad288c0..aeea14ee1f011 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.time; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.joda.Joda; import org.joda.time.DateTime; import java.time.Instant; @@ -87,7 +86,8 @@ default DateTime parseJoda(String input) { * Return the given millis-since-epoch formatted with this format. */ default String formatMillis(long millis) { - return format(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneOffset.UTC)); + ZoneId zone = zone() != null ? zone() : ZoneOffset.UTC; + return format(Instant.ofEpochMilli(millis).atZone(zone)); } /** @@ -121,7 +121,9 @@ default String formatJoda(DateTime dateTime) { ZoneId zone(); /** - * Return a {@link DateMathParser} built from this formatter. + * Create a DateMathParser from the existing formatter + * + * @return The DateMathParser object */ DateMathParser toDateMathParser(); @@ -129,13 +131,14 @@ static DateFormatter forPattern(String input) { if (Strings.hasLength(input) == false) { throw new IllegalArgumentException("No date pattern provided"); } - if (input.startsWith("8") == false) { - return Joda.forPattern(input); + + // support the 6.x BWC compatible way of parsing java 8 dates + if (input.startsWith("8")) { + input = input.substring(1); } - // force java 8 date format List formatters = new ArrayList<>(); - for (String pattern : Strings.delimitedListToStringArray(input.substring(1), "||")) { + for (String pattern : Strings.delimitedListToStringArray(input, "||")) { if (Strings.hasLength(pattern) == false) { throw new IllegalArgumentException("Cannot have empty element in multi date format pattern: " + input); } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index d3bf5eb2a641c..2e3c2953ec375 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -46,7 +46,6 @@ import static java.time.temporal.ChronoField.DAY_OF_WEEK; import static java.time.temporal.ChronoField.DAY_OF_YEAR; import static java.time.temporal.ChronoField.HOUR_OF_DAY; -import static java.time.temporal.ChronoField.MILLI_OF_SECOND; import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; import static java.time.temporal.ChronoField.MONTH_OF_YEAR; import static java.time.temporal.ChronoField.NANO_OF_SECOND; @@ -77,20 +76,43 @@ public class DateFormatters { private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_PRINTER = new DateTimeFormatterBuilder() .append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral('T') - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - .appendFraction(NANO_OF_SECOND, 3, 9, true) + .optionalStart() + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 3, true) + .optionalEnd() + .optionalEnd() .optionalStart() .appendZoneOrOffsetId() .optionalEnd() + .optionalEnd() + .optionalEnd() .toFormatter(Locale.ROOT); private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER = new DateTimeFormatterBuilder() .append(STRICT_YEAR_MONTH_DAY_FORMATTER) .optionalStart() .appendLiteral('T') - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) .optionalStart() - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 3, true) + .optionalEnd() + .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 9, true) + .optionalEnd() .optionalEnd() .optionalStart() .appendZoneOrOffsetId() @@ -99,6 +121,8 @@ public class DateFormatters { .append(TIME_ZONE_FORMATTER_NO_COLON) .optionalEnd() .optionalEnd() + .optionalEnd() + .optionalEnd() .toFormatter(Locale.ROOT); /** @@ -124,11 +148,33 @@ public class DateFormatters { .optionalEnd() .toFormatter(Locale.ROOT); + private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS = new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral('T') + .optionalStart() + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 9, true) + .optionalEnd() + .optionalEnd() + .optionalStart() + .appendZoneOrOffsetId() + .optionalEnd() + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT); + /** * Returns a generic ISO datetime parser where the date is mandatory and the time is optional with nanosecond resolution. */ private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS = new JavaDateFormatter("strict_date_optional_time_nanos", - STRICT_DATE_OPTIONAL_TIME_PRINTER, STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS); + STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS, STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS); ///////////////////////////////////////// // @@ -159,14 +205,14 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter BASIC_TIME_PRINTER = new DateTimeFormatterBuilder() .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .toFormatter(Locale.ROOT); /* @@ -330,31 +376,32 @@ public class DateFormatters { * Returns a basic formatter that combines a basic weekyear date and time * without millis, separated by a 'T' (xxxx'W'wwe'T'HHmmssX). */ - private static final DateFormatter STRICT_BASIC_WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter("strict_basic_week_date_no_millis", - new DateTimeFormatterBuilder() - .append(STRICT_BASIC_WEEK_DATE_PRINTER) - .appendLiteral("T") - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT), - new DateTimeFormatterBuilder() - .append(STRICT_BASIC_WEEK_DATE_PRINTER) - .appendLiteral("T") - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT), - new DateTimeFormatterBuilder() - .append(STRICT_BASIC_WEEK_DATE_PRINTER) - .appendLiteral("T") - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .append(TIME_ZONE_FORMATTER_NO_COLON) - .toFormatter(Locale.ROOT) + private static final DateFormatter STRICT_BASIC_WEEK_DATE_TIME_NO_MILLIS = + new JavaDateFormatter("strict_basic_week_date_time_no_millis", + new DateTimeFormatterBuilder() + .append(STRICT_BASIC_WEEK_DATE_PRINTER) + .appendLiteral("T") + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .appendZoneOrOffsetId() + .toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder() + .append(STRICT_BASIC_WEEK_DATE_PRINTER) + .appendLiteral("T") + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .appendZoneOrOffsetId() + .toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder() + .append(STRICT_BASIC_WEEK_DATE_PRINTER) + .appendLiteral("T") + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .append(TIME_ZONE_FORMATTER_NO_COLON) + .toFormatter(Locale.ROOT) ); /* @@ -372,7 +419,7 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .appendZoneOrOffsetId() .toFormatter(Locale.ROOT), new DateTimeFormatterBuilder() @@ -381,7 +428,7 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .append(TIME_ZONE_FORMATTER_NO_COLON) .toFormatter(Locale.ROOT) ); @@ -390,7 +437,7 @@ public class DateFormatters { * An ISO date formatter that formats or parses a date without an offset, such as '2011-12-03'. */ private static final DateFormatter STRICT_DATE = new JavaDateFormatter("strict_date", - DateTimeFormatter.ISO_LOCAL_DATE.withResolverStyle(ResolverStyle.LENIENT)); + DateTimeFormatter.ISO_LOCAL_DATE.withResolverStyle(ResolverStyle.LENIENT).withLocale(Locale.ROOT)); /* * A date formatter that formats or parses a date plus an hour without an offset, such as '2011-12-03T01'. @@ -438,7 +485,7 @@ public class DateFormatters { .appendLiteral('T') .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) .optionalStart() - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -495,12 +542,12 @@ public class DateFormatters { // NOTE: this is not a strict formatter to retain the joda time based behaviour, even though it's named like this private static final DateTimeFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER = new DateTimeFormatterBuilder() .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER = new DateTimeFormatterBuilder() .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .toFormatter(Locale.ROOT); /* @@ -515,7 +562,9 @@ public class DateFormatters { new JavaDateFormatter("strict_hour_minute_second_millis", STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER); - private static final DateFormatter STRICT_HOUR_MINUTE_SECOND_FRACTION = STRICT_HOUR_MINUTE_SECOND_MILLIS; + private static final DateFormatter STRICT_HOUR_MINUTE_SECOND_FRACTION = + new JavaDateFormatter("strict_hour_minute_second_fraction", + STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER); /* * Returns a formatter that combines a full date, two digit hour of day, @@ -534,11 +583,25 @@ public class DateFormatters { .appendLiteral("T") .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) // this one here is lenient as well to retain joda time based bwc compatibility - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT) ); - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION; + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( + "strict_date_hour_minute_second_millis", + new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral("T") + .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) + .toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral("T") + .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) + // this one here is lenient as well to retain joda time based bwc compatibility + .appendFraction(NANO_OF_SECOND, 1, 3, true) + .toFormatter(Locale.ROOT) + ); /* * Returns a formatter for a two digit hour of day. (HH) @@ -562,7 +625,7 @@ public class DateFormatters { .optionalStart() .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -586,7 +649,7 @@ public class DateFormatters { .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter STRICT_TIME_PRINTER = new DateTimeFormatterBuilder() @@ -595,7 +658,7 @@ public class DateFormatters { .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .toFormatter(Locale.ROOT); /* @@ -783,14 +846,12 @@ public class DateFormatters { private static final DateTimeFormatter DATE_FORMATTER = new DateTimeFormatterBuilder() .appendValue(ChronoField.YEAR, 1, 5, SignStyle.NORMAL) - .optionalStart() .appendLiteral('-') .appendValue(MONTH_OF_YEAR, 1, 2, SignStyle.NOT_NEGATIVE) .optionalStart() .appendLiteral('-') .appendValue(DAY_OF_MONTH, 1, 2, SignStyle.NOT_NEGATIVE) .optionalEnd() - .optionalEnd() .toFormatter(Locale.ROOT); private static final DateTimeFormatter HOUR_MINUTE_FORMATTER = new DateTimeFormatterBuilder() @@ -819,7 +880,7 @@ public class DateFormatters { .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) .optionalEnd() .optionalStart() - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .optionalEnd() .optionalStart().appendZoneOrOffsetId().optionalEnd() .optionalStart().appendOffset("+HHmm", "Z").optionalEnd() @@ -840,7 +901,7 @@ public class DateFormatters { .appendValue(MINUTE_OF_HOUR, 1, 2, SignStyle.NOT_NEGATIVE) .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter ORDINAL_DATE_FORMATTER = new DateTimeFormatterBuilder() @@ -875,7 +936,7 @@ public class DateFormatters { private static final DateTimeFormatter TIME_PREFIX = new DateTimeFormatterBuilder() .append(TIME_NO_MILLIS_FORMATTER) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter WEEK_DATE_FORMATTER = new DateTimeFormatterBuilder() @@ -929,7 +990,17 @@ public class DateFormatters { .append(HOUR_MINUTE_SECOND_MILLIS_FORMATTER) .toFormatter(Locale.ROOT)); - private static final DateFormatter DATE_HOUR_MINUTE_SECOND_FRACTION = DATE_HOUR_MINUTE_SECOND_MILLIS; + private static final DateFormatter DATE_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter("date_hour_minute_second_fraction", + new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral("T") + .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) + .toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder() + .append(DATE_FORMATTER) + .appendLiteral("T") + .append(HOUR_MINUTE_SECOND_MILLIS_FORMATTER) + .toFormatter(Locale.ROOT)); /* * Returns a formatter that combines a full date, two digit hour of day, @@ -963,7 +1034,7 @@ public class DateFormatters { .optionalStart() .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -1034,6 +1105,9 @@ public class DateFormatters { private static final DateFormatter HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter("hour_minute_second_millis", STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, HOUR_MINUTE_SECOND_MILLIS_FORMATTER); + private static final DateFormatter HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter("hour_minute_second_fraction", + STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, HOUR_MINUTE_SECOND_MILLIS_FORMATTER); + /* * Returns a formatter for a two digit hour of day and two digit minute of * hour. (HH:mm) @@ -1068,7 +1142,7 @@ public class DateFormatters { .optionalStart() .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -1273,7 +1347,7 @@ public class DateFormatters { // ///////////////////////////////////////// - public static DateFormatter forPattern(String input) { + static DateFormatter forPattern(String input) { if (Strings.hasLength(input)) { input = input.trim(); } @@ -1332,7 +1406,7 @@ public static DateFormatter forPattern(String input) { } else if ("hourMinuteSecond".equals(input) || "hour_minute_second".equals(input)) { return HOUR_MINUTE_SECOND; } else if ("hourMinuteSecondFraction".equals(input) || "hour_minute_second_fraction".equals(input)) { - return HOUR_MINUTE_SECOND_MILLIS; + return HOUR_MINUTE_SECOND_FRACTION; } else if ("hourMinuteSecondMillis".equals(input) || "hour_minute_second_millis".equals(input)) { return HOUR_MINUTE_SECOND_MILLIS; } else if ("ordinalDate".equals(input) || "ordinal_date".equals(input)) { @@ -1453,18 +1527,21 @@ static JavaDateFormatter merge(String pattern, List formatters) { assert formatters.size() > 0; List dateTimeFormatters = new ArrayList<>(formatters.size()); + DateTimeFormatterBuilder roundupBuilder = new DateTimeFormatterBuilder(); DateTimeFormatter printer = null; for (DateFormatter formatter : formatters) { assert formatter instanceof JavaDateFormatter; JavaDateFormatter javaDateFormatter = (JavaDateFormatter) formatter; - DateTimeFormatter dateTimeFormatter = javaDateFormatter.getParser(); if (printer == null) { printer = javaDateFormatter.getPrinter(); } - dateTimeFormatters.add(dateTimeFormatter); + dateTimeFormatters.add(javaDateFormatter.getParser()); + roundupBuilder.appendOptional(javaDateFormatter.getRoundupParser()); } + DateTimeFormatter roundUpParser = roundupBuilder.toFormatter(Locale.ROOT); - return new JavaDateFormatter(pattern, printer, dateTimeFormatters.toArray(new DateTimeFormatter[0])); + return new JavaDateFormatter(pattern, printer, builder -> builder.append(roundUpParser), + dateTimeFormatters.toArray(new DateTimeFormatter[0])); } private static final ZonedDateTime EPOCH_ZONED_DATE_TIME = Instant.EPOCH.atZone(ZoneOffset.UTC); diff --git a/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java index 1e997cce23be8..3ba392822ca0c 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java @@ -21,6 +21,7 @@ import org.joda.time.DateTimeZone; +import java.time.Instant; import java.time.ZoneId; import java.util.function.LongSupplier; @@ -32,7 +33,7 @@ public interface DateMathParser { /** * Parse a date math expression without timzeone info and rounding down. */ - default long parse(String text, LongSupplier now) { + default Instant parse(String text, LongSupplier now) { return parse(text, now, false, (ZoneId) null); } @@ -42,7 +43,7 @@ default long parse(String text, LongSupplier now) { // exists for backcompat, do not use! @Deprecated - default long parse(String text, LongSupplier now, boolean roundUp, DateTimeZone tz) { + default Instant parse(String text, LongSupplier now, boolean roundUp, DateTimeZone tz) { return parse(text, now, roundUp, tz == null ? null : ZoneId.of(tz.getID())); } @@ -68,7 +69,7 @@ default long parse(String text, LongSupplier now, boolean roundUp, DateTimeZone * @param now a supplier to retrieve the current date in milliseconds, if needed for additions * @param roundUp should the result be rounded up * @param tz an optional timezone that should be applied before returning the milliseconds since the epoch - * @return the parsed date in milliseconds since the epoch + * @return the parsed date as an Instant since the epoch */ - long parse(String text, LongSupplier now, boolean roundUp, ZoneId tz); + Instant parse(String text, LongSupplier now, boolean roundUp, ZoneId tz); } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java index c46cee881a1a0..e913a69dca776 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java @@ -65,12 +65,16 @@ public static ZoneId dateTimeZoneToZoneId(DateTimeZone timeZone) { return ZoneOffset.UTC; } - String deprecatedId = DEPRECATED_SHORT_TIMEZONES.get(timeZone.getID()); + return of(timeZone.getID()); + } + + public static ZoneId of(String zoneId) { + String deprecatedId = DEPRECATED_SHORT_TIMEZONES.get(zoneId); if (deprecatedId != null) { deprecationLogger.deprecatedAndMaybeLog("timezone", - "Use of short timezone id " + timeZone.getID() + " is deprecated. Use " + deprecatedId + " instead"); + "Use of short timezone id " + zoneId + " is deprecated. Use " + deprecatedId + " instead"); return ZoneId.of(deprecatedId); } - return ZoneId.of(timeZone.getID()); + return ZoneId.of(zoneId).normalized(); } } diff --git a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java index 518957cd2eb9d..c824a7c7e7c35 100644 --- a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java +++ b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.time; +import org.elasticsearch.bootstrap.JavaVersion; + import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; import java.time.format.ResolverStyle; @@ -99,6 +101,10 @@ public TemporalAccessor resolve(Map fieldValues, } fieldValues.put(ChronoField.INSTANT_SECONDS, seconds); fieldValues.put(ChronoField.NANO_OF_SECOND, nanos); + // if there is already a milli of second, we need to overwrite it + if (fieldValues.containsKey(ChronoField.MILLI_OF_SECOND)) { + fieldValues.put(ChronoField.MILLI_OF_SECOND, nanos / 1_000_000); + } return null; } }; @@ -106,7 +112,8 @@ public TemporalAccessor resolve(Map fieldValues, private static final EpochField NANOS_OF_MILLI = new EpochField(ChronoUnit.NANOS, ChronoUnit.MILLIS, ValueRange.of(0, 999_999)) { @Override public boolean isSupportedBy(TemporalAccessor temporal) { - return temporal.isSupported(ChronoField.NANO_OF_SECOND) && temporal.getLong(ChronoField.NANO_OF_SECOND) % 1_000_000 != 0; + return temporal.isSupported(ChronoField.INSTANT_SECONDS) && temporal.isSupported(ChronoField.NANO_OF_SECOND) + && temporal.getLong(ChronoField.NANO_OF_SECOND) % 1_000_000 != 0; } @Override public long getFrom(TemporalAccessor temporal) { @@ -153,10 +160,23 @@ public long getFrom(TemporalAccessor temporal) { .toFormatter(Locale.ROOT); static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter("epoch_second", SECONDS_FORMATTER3, + builder -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), SECONDS_FORMATTER1, SECONDS_FORMATTER2, SECONDS_FORMATTER3); - static final DateFormatter MILLIS_FORMATTER = new JavaDateFormatter("epoch_millis", MILLISECONDS_FORMATTER3, - MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2, MILLISECONDS_FORMATTER3); + static final DateFormatter MILLIS_FORMATTER = getEpochMillisFormatter(); + + private static DateFormatter getEpochMillisFormatter() { + // the third formatter fails under java 8 as a printer, so fall back to this one + final DateTimeFormatter printer; + if (JavaVersion.current().getVersion().get(0) == 8) { + printer = MILLISECONDS_FORMATTER1; + } else { + printer = MILLISECONDS_FORMATTER3; + } + return new JavaDateFormatter("epoch_millis", printer, + builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), + MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2, MILLISECONDS_FORMATTER3); + } private abstract static class EpochField implements TemporalField { diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index 0fce14b764ef1..bcdf9cbdcf674 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -24,6 +24,7 @@ import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; +import java.time.format.DateTimeParseException; import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalField; @@ -32,6 +33,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.function.Consumer; class JavaDateFormatter implements DateFormatter { @@ -43,14 +45,27 @@ class JavaDateFormatter implements DateFormatter { ROUND_UP_BASE_FIELDS.put(ChronoField.HOUR_OF_DAY, 23L); ROUND_UP_BASE_FIELDS.put(ChronoField.MINUTE_OF_HOUR, 59L); ROUND_UP_BASE_FIELDS.put(ChronoField.SECOND_OF_MINUTE, 59L); - ROUND_UP_BASE_FIELDS.put(ChronoField.MILLI_OF_SECOND, 999L); + ROUND_UP_BASE_FIELDS.put(ChronoField.NANO_OF_SECOND, 999_999_999L); } private final String format; private final DateTimeFormatter printer; private final DateTimeFormatter parser; + private final DateTimeFormatter roundupParser; + + private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter roundupParser, DateTimeFormatter parser) { + this.format = format; + this.printer = printer; + this.roundupParser = roundupParser; + this.parser = parser; + } JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { + this(format, printer, builder -> ROUND_UP_BASE_FIELDS.forEach(builder::parseDefaulting), parsers); + } + + JavaDateFormatter(String format, DateTimeFormatter printer, Consumer roundupParserConsumer, + DateTimeFormatter... parsers) { if (printer == null) { throw new IllegalArgumentException("printer may not be null"); } @@ -62,6 +77,8 @@ class JavaDateFormatter implements DateFormatter { if (distinctLocales > 1) { throw new IllegalArgumentException("formatters must have the same locale"); } + this.printer = printer; + this.format = format; if (parsers.length == 0) { this.parser = printer; } else if (parsers.length == 1) { @@ -73,8 +90,21 @@ class JavaDateFormatter implements DateFormatter { } this.parser = builder.toFormatter(Locale.ROOT); } - this.format = format; - this.printer = printer; + + DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); + if (format.contains("||") == false) { + builder.append(this.parser); + } + roundupParserConsumer.accept(builder); + DateTimeFormatter roundupFormatter = builder.toFormatter(parser.getLocale()); + if (printer.getZone() != null) { + roundupFormatter = roundupFormatter.withZone(printer.getZone()); + } + this.roundupParser = roundupFormatter; + } + + DateTimeFormatter getRoundupParser() { + return roundupParser; } DateTimeFormatter getParser() { @@ -90,7 +120,12 @@ public TemporalAccessor parse(String input) { if (Strings.isNullOrEmpty(input)) { throw new IllegalArgumentException("cannot parse empty date"); } - return parser.parse(input); + + try { + return parser.parse(input); + } catch (DateTimeParseException e) { + throw new IllegalArgumentException("failed to parse date field [" + input + "] with format [" + format + "]", e); + } } @Override @@ -100,7 +135,7 @@ public DateFormatter withZone(ZoneId zoneId) { return this; } - return new JavaDateFormatter(format, printer.withZone(zoneId), parser.withZone(zoneId)); + return new JavaDateFormatter(format, printer.withZone(zoneId), roundupParser.withZone(zoneId), parser.withZone(zoneId)); } @Override @@ -110,7 +145,7 @@ public DateFormatter withLocale(Locale locale) { return this; } - return new JavaDateFormatter(format, printer.withLocale(locale), parser.withLocale(locale)); + return new JavaDateFormatter(format, printer.withLocale(locale), roundupParser.withLocale(locale), parser.withLocale(locale)); } @Override @@ -123,12 +158,6 @@ public String pattern() { return format; } - JavaDateFormatter parseDefaulting(Map fields) { - final DateTimeFormatterBuilder parseDefaultingBuilder = new DateTimeFormatterBuilder().append(printer); - fields.forEach(parseDefaultingBuilder::parseDefaulting); - return new JavaDateFormatter(format, parseDefaultingBuilder.toFormatter(Locale.ROOT)); - } - @Override public Locale locale() { return this.printer.getLocale(); @@ -141,7 +170,7 @@ public ZoneId zone() { @Override public DateMathParser toDateMathParser() { - return new JavaDateMathParser(this, this.parseDefaulting(ROUND_UP_BASE_FIELDS)); + return new JavaDateMathParser(format, parser, roundupParser); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java index 2d3fb7e176e89..9ee390ba391a7 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java @@ -20,14 +20,16 @@ package org.elasticsearch.common.time; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; -import java.time.DateTimeException; import java.time.DayOfWeek; import java.time.Instant; import java.time.LocalTime; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalAdjusters; @@ -44,24 +46,25 @@ */ public class JavaDateMathParser implements DateMathParser { + private final DateTimeFormatter formatter; + private final DateTimeFormatter roundUpFormatter; + private final String format; - - private final DateFormatter formatter; - private final DateFormatter roundUpFormatter; - - public JavaDateMathParser(DateFormatter formatter, DateFormatter roundUpFormatter) { + JavaDateMathParser(String format, DateTimeFormatter formatter, DateTimeFormatter roundUpFormatter) { + this.format = format; Objects.requireNonNull(formatter); this.formatter = formatter; this.roundUpFormatter = roundUpFormatter; } @Override - public long parse(String text, LongSupplier now, boolean roundUp, ZoneId timeZone) { - long time; + public Instant parse(String text, LongSupplier now, boolean roundUp, ZoneId timeZone) { + Instant time; String mathString; if (text.startsWith("now")) { try { - time = now.getAsLong(); + // TODO only millisecond granularity here! + time = Instant.ofEpochMilli(now.getAsLong()); } catch (Exception e) { throw new ElasticsearchParseException("could not read the current timestamp", e); } @@ -78,12 +81,12 @@ public long parse(String text, LongSupplier now, boolean roundUp, ZoneId timeZon return parseMath(mathString, time, roundUp, timeZone); } - private long parseMath(final String mathString, final long time, final boolean roundUp, + private Instant parseMath(final String mathString, final Instant time, final boolean roundUp, ZoneId timeZone) throws ElasticsearchParseException { if (timeZone == null) { timeZone = ZoneOffset.UTC; } - ZonedDateTime dateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), timeZone); + ZonedDateTime dateTime = ZonedDateTime.ofInstant(time, timeZone); for (int i = 0; i < mathString.length(); ) { char c = mathString.charAt(i++); final boolean round; @@ -204,14 +207,18 @@ private long parseMath(final String mathString, final long time, final boolean r dateTime = dateTime.minus(1, ChronoField.MILLI_OF_SECOND.getBaseUnit()); } } - return dateTime.toInstant().toEpochMilli(); + return dateTime.toInstant(); } - private long parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNoTime) { - DateFormatter formatter = roundUpIfNoTime ? this.roundUpFormatter : this.formatter; + private Instant parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNoTime) { + if (Strings.isNullOrEmpty(value)) { + throw new ElasticsearchParseException("cannot parse empty date"); + } + + DateTimeFormatter formatter = roundUpIfNoTime ? this.roundUpFormatter : this.formatter; try { if (timeZone == null) { - return DateFormatters.toZonedDateTime(formatter.parse(value)).toInstant().toEpochMilli(); + return DateFormatters.toZonedDateTime(formatter.parse(value)).toInstant(); } else { TemporalAccessor accessor = formatter.parse(value); ZoneId zoneId = TemporalQueries.zone().queryFrom(accessor); @@ -219,10 +226,11 @@ private long parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNoTim timeZone = zoneId; } - return DateFormatters.toZonedDateTime(accessor).withZoneSameLocal(timeZone).toInstant().toEpochMilli(); + return DateFormatters.toZonedDateTime(accessor).withZoneSameLocal(timeZone).toInstant(); } - } catch (IllegalArgumentException | DateTimeException e) { - throw new ElasticsearchParseException("failed to parse date field [{}]: [{}]", e, value, e.getMessage()); + } catch (DateTimeParseException e) { + throw new ElasticsearchParseException("failed to parse date field [{}] with format [{}]: [{}]", + e, value, format, e.getMessage()); } } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java index f32ba715a8068..3f731b73dc870 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java @@ -22,7 +22,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.script.JodaCompatibleZonedDateTime; @@ -65,9 +64,9 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension { public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC); - public static final DateFormatter DEFAULT_FORMATTER = DateFormatters.forPattern("strict_date_optional_time_nanos"); - public static final DateFormatter LOCAL_TIME_FORMATTER = DateFormatters.forPattern("HH:mm:ss.SSS"); - public static final DateFormatter OFFSET_TIME_FORMATTER = DateFormatters.forPattern("HH:mm:ss.SSSZZZZZ"); + public static final DateFormatter DEFAULT_FORMATTER = DateFormatter.forPattern("strict_date_optional_time_nanos"); + public static final DateFormatter LOCAL_TIME_FORMATTER = DateFormatter.forPattern("HH:mm:ss.SSS"); + public static final DateFormatter OFFSET_TIME_FORMATTER = DateFormatter.forPattern("HH:mm:ss.SSSZZZZZ"); @Override public Map, XContentBuilder.Writer> getXContentWriters() { diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 2d236b08f79de..2f676eb846770 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -59,6 +59,7 @@ import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.node.Node; import java.io.Closeable; import java.io.IOException; @@ -309,6 +310,11 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce if (DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings)) { ensureAtomicMoveSupported(nodePaths); } + + if (DiscoveryNode.isDataNode(settings) == false) { + ensureNoShardData(nodePaths); + } + success = true; } finally { if (success == false) { @@ -1030,6 +1036,38 @@ private static void ensureAtomicMoveSupported(final NodePath[] nodePaths) throws } } + private void ensureNoShardData(final NodePath[] nodePaths) throws IOException { + List shardDataPaths = new ArrayList<>(); + for (NodePath nodePath : nodePaths) { + Path indicesPath = nodePath.indicesPath; + if (Files.isDirectory(indicesPath)) { + try (DirectoryStream indexStream = Files.newDirectoryStream(indicesPath)) { + for (Path indexPath : indexStream) { + if (Files.isDirectory(indexPath)) { + try (Stream shardStream = Files.list(indexPath)) { + shardStream.filter(this::isShardPath) + .map(Path::toAbsolutePath) + .forEach(shardDataPaths::add); + } + } + } + } + } + } + + if (shardDataPaths.isEmpty() == false) { + throw new IllegalStateException("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data: " + + shardDataPaths); + } + } + + private boolean isShardPath(Path path) { + return Files.isDirectory(path) + && path.getFileName().toString().chars().allMatch(Character::isDigit); + } + /** * Resolve the custom path for a index's shard. * Uses the {@code IndexMetaData.SETTING_DATA_PATH} setting to determine @@ -1140,3 +1178,4 @@ private static void tryWriteTempFile(Path path) throws IOException { } } } + diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index 92f305cb67a1f..b79a5e77309b9 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -278,16 +278,6 @@ protected void onServerException(HttpServerChannel channel, Exception e) { logger.error(new ParameterizedMessage("exception from http server channel caught on transport layer [channel={}]", channel), e); } - /** - * Exception handler for exceptions that are not associated with a specific channel. - * - * @param exception the exception - */ - protected void onNonChannelException(Exception exception) { - String threadName = Thread.currentThread().getName(); - logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", threadName), exception); - } - protected void serverAcceptedChannel(HttpChannel httpChannel) { boolean addedOnThisCall = httpChannels.add(httpChannel); assert addedOnThisCall : "Channel should only be added to http channel set once"; diff --git a/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java index 0ce8edcf3b662..de345a39fd6d9 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java @@ -29,8 +29,6 @@ public interface HttpServerTransport extends LifecycleComponent { String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; - String HTTP_SERVER_ACCEPTOR_THREAD_NAME_PREFIX = "http_server_acceptor"; - BoundTransportAddress boundAddress(); HttpInfo info(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index c926a5a47191e..5c09708b62cae 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -30,6 +30,8 @@ import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; +import org.elasticsearch.Assertions; +import org.elasticsearch.Version; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.core.internal.io.IOUtils; @@ -98,7 +100,25 @@ public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats indexWriterLock = obtainLock ? directory.obtainLock(IndexWriter.WRITE_LOCK_NAME) : null; this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory); this.translogStats = translogStats == null ? new TranslogStats(0, 0, 0, 0, 0) : translogStats; - this.seqNoStats = seqNoStats == null ? buildSeqNoStats(lastCommittedSegmentInfos) : seqNoStats; + if (seqNoStats == null) { + seqNoStats = buildSeqNoStats(lastCommittedSegmentInfos); + // During a peer-recovery the global checkpoint is not known and up to date when the engine + // is created, so we only check the max seq no / global checkpoint coherency when the global + // checkpoint is different from the unassigned sequence number value. + // In addition to that we only execute the check if the index the engine belongs to has been + // created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction + // that guarantee that all operations have been flushed to Lucene. + final long globalCheckpoint = engineConfig.getGlobalCheckpointSupplier().getAsLong(); + if (globalCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO + && engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_7_0)) { + if (seqNoStats.getMaxSeqNo() != globalCheckpoint) { + assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), globalCheckpoint); + throw new IllegalStateException("Maximum sequence number [" + seqNoStats.getMaxSeqNo() + + "] from last commit does not match global checkpoint [" + globalCheckpoint + "]"); + } + } + } + this.seqNoStats = seqNoStats; this.indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, directory); reader = open(indexCommit); reader = wrapReader(reader, readerWrapperFunction); @@ -116,6 +136,12 @@ public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats } } + protected void assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) { + if (Assertions.ENABLED) { + assert false : "max seq. no. [" + maxSeqNo + "] does not match [" + globalCheckpoint + "]"; + } + } + protected final DirectoryReader wrapReader(DirectoryReader reader, Function readerWrapperFunction) throws IOException { reader = ElasticsearchDirectoryReader.wrap(reader, engineConfig.getShardId()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java index 69b6a6e04a936..7a5bd97770297 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.ObjectArrayList; - import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; @@ -41,9 +40,9 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Base64; import java.util.List; import java.util.Map; @@ -108,7 +107,7 @@ public String typeName() { } @Override - public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { return DocValueFormat.BINARY; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 9e0b9f62acbe7..caf8baac24da1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -40,9 +40,9 @@ import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -190,7 +190,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { } @Override - public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 1e17aab31605b..0dcf52d5e54f2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -33,13 +33,15 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateMathParser; -import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -49,18 +51,17 @@ import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; -import static org.elasticsearch.index.mapper.TypeParsers.parseDateTimeFormatter; - -/** A {@link FieldMapper} for ip addresses. */ +/** A {@link FieldMapper} for dates. */ public class DateFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "date"; @@ -73,8 +74,8 @@ public static class Defaults { public static class Builder extends FieldMapper.Builder { private Boolean ignoreMalformed; + private Explicit format = new Explicit<>(DEFAULT_DATE_TIME_FORMATTER.pattern(), false); private Locale locale; - private boolean dateTimeFormatterSet = false; public Builder(String name) { super(name, new DateFieldType(), new DateFieldType()); @@ -102,27 +103,37 @@ protected Explicit ignoreMalformed(BuilderContext context) { return Defaults.IGNORE_MALFORMED; } - /** Whether an explicit format for this date field has been set already. */ - public boolean isDateTimeFormatterSet() { - return dateTimeFormatterSet; + public Builder locale(Locale locale) { + this.locale = locale; + return this; + } + + public Locale locale() { + return locale; } - public Builder dateTimeFormatter(DateFormatter dateTimeFormatter) { - fieldType().setDateTimeFormatter(dateTimeFormatter); - dateTimeFormatterSet = true; + public String format() { + return format.value(); + } + + public Builder format(String format) { + this.format = new Explicit<>(format, true); return this; } - public void locale(Locale locale) { - this.locale = locale; + public boolean isFormatterSet() { + return format.explicit(); } @Override protected void setupFieldType(BuilderContext context) { super.setupFieldType(context); + String pattern = this.format.value(); DateFormatter dateTimeFormatter = fieldType().dateTimeFormatter; - if (!locale.equals(dateTimeFormatter.locale())) { - fieldType().setDateTimeFormatter(dateTimeFormatter.withLocale(locale)); + + boolean hasPatternChanged = Strings.hasLength(pattern) && Objects.equals(pattern, dateTimeFormatter.pattern()) == false; + if (hasPatternChanged || Objects.equals(builder.locale, dateTimeFormatter.locale()) == false) { + fieldType().setDateTimeFormatter(DateFormatter.forPattern(pattern).withLocale(locale)); } } @@ -160,7 +171,7 @@ public Mapper.Builder parse(String name, Map node, ParserCo builder.locale(LocaleUtils.parse(propNode.toString())); iterator.remove(); } else if (propName.equals("format")) { - builder.dateTimeFormatter(parseDateTimeFormatter(propNode)); + builder.format(propNode.toString()); iterator.remove(); } else if (TypeParsers.parseMultiField(builder, name, parserContext, propName, propNode)) { iterator.remove(); @@ -196,13 +207,12 @@ public MappedFieldType clone() { public boolean equals(Object o) { if (!super.equals(o)) return false; DateFieldType that = (DateFieldType) o; - return Objects.equals(dateTimeFormatter.pattern(), that.dateTimeFormatter.pattern()) && - Objects.equals(dateTimeFormatter.locale(), that.dateTimeFormatter.locale()); + return Objects.equals(dateTimeFormatter, that.dateTimeFormatter); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), dateTimeFormatter.pattern(), dateTimeFormatter.locale()); + return Objects.hash(super.hashCode(), dateTimeFormatter); } @Override @@ -214,10 +224,10 @@ public String typeName() { public void checkCompatibility(MappedFieldType fieldType, List conflicts) { super.checkCompatibility(fieldType, conflicts); DateFieldType other = (DateFieldType) fieldType; - if (Objects.equals(dateTimeFormatter().pattern(), other.dateTimeFormatter().pattern()) == false) { + if (Objects.equals(dateTimeFormatter.pattern(), other.dateTimeFormatter.pattern()) == false) { conflicts.add("mapper [" + name() + "] has different [format] values"); } - if (Objects.equals(dateTimeFormatter().locale(), other.dateTimeFormatter().locale()) == false) { + if (Objects.equals(dateTimeFormatter.locale(), other.dateTimeFormatter.locale()) == false) { conflicts.add("mapper [" + name() + "] has different [locale] values"); } } @@ -226,9 +236,9 @@ public DateFormatter dateTimeFormatter() { return dateTimeFormatter; } - public void setDateTimeFormatter(DateFormatter dateTimeFormatter) { + void setDateTimeFormatter(DateFormatter formatter) { checkIfFrozen(); - this.dateTimeFormatter = dateTimeFormatter; + this.dateTimeFormatter = formatter; this.dateMathParser = dateTimeFormatter.toDateMathParser(); } @@ -237,7 +247,7 @@ protected DateMathParser dateMathParser() { } long parse(String value) { - return dateTimeFormatter().parseMillis(value); + return DateFormatters.toZonedDateTime(dateTimeFormatter().parse(value)).toInstant().toEpochMilli(); } @Override @@ -260,7 +270,7 @@ public Query termQuery(Object value, @Nullable QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, ShapeRelation relation, - @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser, QueryShardContext context) { + @Nullable ZoneId timeZone, @Nullable DateMathParser forcedDateParser, QueryShardContext context) { failIfNotIndexed(); if (relation == ShapeRelation.DISJOINT) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + @@ -294,8 +304,8 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower return query; } - public long parseToMilliseconds(Object value, boolean roundUp, @Nullable DateTimeZone zone, - @Nullable DateMathParser forcedDateParser, QueryRewriteContext context) { + public long parseToMilliseconds(Object value, boolean roundUp, + @Nullable ZoneId zone, @Nullable DateMathParser forcedDateParser, QueryRewriteContext context) { DateMathParser dateParser = dateMathParser(); if (forcedDateParser != null) { dateParser = forcedDateParser; @@ -307,13 +317,13 @@ public long parseToMilliseconds(Object value, boolean roundUp, @Nullable DateTim } else { strValue = value.toString(); } - return dateParser.parse(strValue, context::nowInMillis, roundUp, DateUtils.dateTimeZoneToZoneId(zone)); + return dateParser.parse(strValue, context::nowInMillis, roundUp, zone).toEpochMilli(); } @Override - public Relation isFieldWithinQuery(IndexReader reader, Object from, Object to, boolean includeLower, boolean includeUpper, - DateTimeZone timeZone, DateMathParser dateParser, - QueryRewriteContext context) throws IOException { + public Relation isFieldWithinQuery(IndexReader reader, + Object from, Object to, boolean includeLower, boolean includeUpper, + ZoneId timeZone, DateMathParser dateParser, QueryRewriteContext context) throws IOException { if (dateParser == null) { dateParser = this.dateMathParser; } @@ -376,13 +386,13 @@ public Object valueForDisplay(Object value) { } @Override - public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { DateFormatter dateTimeFormatter = this.dateTimeFormatter; if (format != null) { - dateTimeFormatter = DateFormatter.forPattern(format); + dateTimeFormatter = DateFormatter.forPattern(format).withLocale(dateTimeFormatter.locale()); } if (timeZone == null) { - timeZone = DateTimeZone.UTC; + timeZone = ZoneOffset.UTC; } return new DocValueFormat.DateTime(dateTimeFormatter, timeZone); } @@ -442,7 +452,7 @@ protected void parseCreateField(ParseContext context, List field long timestamp; try { timestamp = fieldType().parse(dateAsString); - } catch (IllegalArgumentException e) { + } catch (IllegalArgumentException | ElasticsearchParseException e) { if (ignoreMalformed.value()) { context.addIgnoredField(fieldType.name()); return; @@ -489,8 +499,9 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, || fieldType().dateTimeFormatter().pattern().equals(DEFAULT_DATE_TIME_FORMATTER.pattern()) == false) { builder.field("format", fieldType().dateTimeFormatter().pattern()); } + if (includeDefaults - || fieldType().dateTimeFormatter().locale() != Locale.ROOT) { + || fieldType().dateTimeFormatter().locale().equals(DEFAULT_DATE_TIME_FORMATTER.locale()) == false) { builder.field("locale", fieldType().dateTimeFormatter().locale()); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 54e59691f80d5..fe2bd6e9eed59 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -21,6 +21,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -35,6 +36,7 @@ import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; import java.io.IOException; +import java.time.format.DateTimeParseException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -672,7 +674,7 @@ private static Mapper.Builder createBuilderFromFieldType(final ParseContext private static Mapper.Builder newDateBuilder(String name, DateFormatter dateTimeFormatter, Version indexCreated) { DateFieldMapper.Builder builder = new DateFieldMapper.Builder(name); if (dateTimeFormatter != null) { - builder.dateTimeFormatter(dateTimeFormatter); + builder.format(dateTimeFormatter.pattern()).locale(dateTimeFormatter.locale()); } return builder; } @@ -717,8 +719,8 @@ private static Mapper.Builder createBuilderFromDynamicValue(final ParseCont // `epoch_millis` or `YYYY` for (DateFormatter dateTimeFormatter : context.root().dynamicDateTimeFormatters()) { try { - dateTimeFormatter.parseMillis(text); - } catch (IllegalArgumentException e) { + dateTimeFormatter.parse(text); + } catch (ElasticsearchParseException | DateTimeParseException | IllegalArgumentException e) { // failure to parse this, continue continue; } @@ -728,8 +730,8 @@ private static Mapper.Builder createBuilderFromDynamicValue(final ParseCont } if (builder instanceof DateFieldMapper.Builder) { DateFieldMapper.Builder dateBuilder = (DateFieldMapper.Builder) builder; - if (dateBuilder.isDateTimeFormatterSet() == false) { - dateBuilder.dateTimeFormatter(dateTimeFormatter); + if (dateBuilder.isFormatterSet() == false) { + dateBuilder.format(dateTimeFormatter.pattern()).locale(dateTimeFormatter.locale()); } } return builder; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 79b2b0c4c67d1..32c44fd5f55a0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -73,21 +73,14 @@ public static class Defaults { } } - public static class Builder extends MetadataFieldMapper.Builder { + private static class Builder extends MetadataFieldMapper.Builder { private boolean enabled = Defaults.ENABLED; - public Builder(MappedFieldType existing) { + private Builder(MappedFieldType existing) { super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); } - @Override - @Deprecated - public Builder index(boolean index) { - enabled(index); - return super.index(index); - } - - public Builder enabled(boolean enabled) { + private Builder enabled(boolean enabled) { this.enabled = enabled; return this; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index a8ef46b93060e..2b52e42ffe558 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -44,10 +44,10 @@ import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.net.InetAddress; +import java.time.ZoneId; import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -303,7 +303,7 @@ public Object valueForDisplay(Object value) { } @Override - public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index f785e01125f69..5ef689709400d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -50,9 +50,9 @@ import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.List; import java.util.Objects; @@ -335,10 +335,10 @@ public Query termsQuery(List values, @Nullable QueryShardContext context) { * @param relation the relation, nulls should be interpreted like INTERSECTS */ public Query rangeQuery( - Object lowerTerm, Object upperTerm, - boolean includeLower, boolean includeUpper, - ShapeRelation relation, DateTimeZone timeZone, DateMathParser parser, - QueryShardContext context) { + Object lowerTerm, Object upperTerm, + boolean includeLower, boolean includeUpper, + ShapeRelation relation, ZoneId timeZone, DateMathParser parser, + QueryShardContext context) { throw new IllegalArgumentException("Field [" + name + "] of type [" + typeName() + "] does not support range queries"); } @@ -413,7 +413,7 @@ public Relation isFieldWithinQuery( IndexReader reader, Object from, Object to, boolean includeLower, boolean includeUpper, - DateTimeZone timeZone, DateMathParser dateMathParser, QueryRewriteContext context) throws IOException { + ZoneId timeZone, DateMathParser dateMathParser, QueryRewriteContext context) throws IOException { return Relation.INTERSECTS; } @@ -448,7 +448,7 @@ public void setEagerGlobalOrdinals(boolean eagerGlobalOrdinals) { /** Return a {@link DocValueFormat} that can be used to display and parse * values as returned by the fielddata API. * The default implementation returns a {@link DocValueFormat#RAW}. */ - public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 8d9a688776548..06e12ca8b5e4c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -53,9 +53,9 @@ import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; @@ -961,7 +961,7 @@ public Object valueForDisplay(Object value) { } @Override - public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { if (timeZone != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones"); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index d93c909ff8445..e5ba55de7bfd0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -42,6 +42,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.network.InetAddresses; @@ -49,19 +50,18 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateMathParser; -import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.index.query.QueryShardContext; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; @@ -71,7 +71,6 @@ import java.util.Objects; import java.util.Set; -import static org.elasticsearch.index.mapper.TypeParsers.parseDateTimeFormatter; import static org.elasticsearch.index.query.RangeQueryBuilder.GTE_FIELD; import static org.elasticsearch.index.query.RangeQueryBuilder.GT_FIELD; import static org.elasticsearch.index.query.RangeQueryBuilder.LTE_FIELD; @@ -92,12 +91,12 @@ public static class Defaults { public static class Builder extends FieldMapper.Builder { private Boolean coerce; - private Locale locale; + private Locale locale = Locale.ROOT; + private String pattern; public Builder(String name, RangeType type) { super(name, new RangeFieldType(type), new RangeFieldType(type)); builder = this; - locale = Locale.ROOT; } @Override @@ -128,8 +127,8 @@ protected Explicit coerce(BuilderContext context) { return Defaults.COERCE; } - public Builder dateTimeFormatter(DateFormatter dateTimeFormatter) { - fieldType().setDateTimeFormatter(dateTimeFormatter); + public Builder format(String format) { + this.pattern = format; return this; } @@ -145,12 +144,15 @@ public void locale(Locale locale) { @Override protected void setupFieldType(BuilderContext context) { super.setupFieldType(context); - DateFormatter dateTimeFormatter = fieldType().dateTimeFormatter; + DateFormatter formatter = fieldType().dateTimeFormatter; if (fieldType().rangeType == RangeType.DATE) { - if (!locale.equals(dateTimeFormatter.locale())) { - fieldType().setDateTimeFormatter(dateTimeFormatter.withLocale(locale)); + boolean hasPatternChanged = Strings.hasLength(builder.pattern) && + Objects.equals(builder.pattern, formatter.pattern()) == false; + + if (hasPatternChanged || Objects.equals(builder.locale, formatter.locale()) == false) { + fieldType().setDateTimeFormatter(DateFormatter.forPattern(pattern).withLocale(locale)); } - } else if (dateTimeFormatter != null) { + } else if (pattern != null) { throw new IllegalArgumentException("field [" + name() + "] of type [" + fieldType().rangeType + "] should not define a dateTimeFormatter unless it is a " + RangeType.DATE + " type"); } @@ -190,7 +192,7 @@ public Mapper.Builder parse(String name, Map node, builder.locale(LocaleUtils.parse(propNode.toString())); iterator.remove(); } else if (propName.equals("format")) { - builder.dateTimeFormatter(parseDateTimeFormatter(propNode)); + builder.format(propNode.toString()); iterator.remove(); } else if (TypeParsers.parseMultiField(builder, name, parserContext, propName, propNode)) { iterator.remove(); @@ -219,8 +221,8 @@ public static final class RangeFieldType extends MappedFieldType { RangeFieldType(RangeFieldType other) { super(other); this.rangeType = other.rangeType; - if (other.dateTimeFormatter() != null) { - setDateTimeFormatter(other.dateTimeFormatter); + if (other.rangeType == RangeType.DATE && other.dateTimeFormatter() != null) { + setDateTimeFormatter(other.dateTimeFormatter()); } } @@ -235,15 +237,13 @@ public boolean equals(Object o) { RangeFieldType that = (RangeFieldType) o; return Objects.equals(rangeType, that.rangeType) && (rangeType == RangeType.DATE) ? - Objects.equals(dateTimeFormatter.pattern(), that.dateTimeFormatter.pattern()) - && Objects.equals(dateTimeFormatter.locale(), that.dateTimeFormatter.locale()) + Objects.equals(dateTimeFormatter, that.dateTimeFormatter) : dateTimeFormatter == null && that.dateTimeFormatter == null; } @Override public int hashCode() { - return (dateTimeFormatter == null) ? Objects.hash(super.hashCode(), rangeType) - : Objects.hash(super.hashCode(), rangeType, dateTimeFormatter.pattern(), dateTimeFormatter.locale()); + return Objects.hash(super.hashCode(), rangeType, dateTimeFormatter); } @Override @@ -285,7 +285,7 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, - ShapeRelation relation, DateTimeZone timeZone, DateMathParser parser, QueryShardContext context) { + ShapeRelation relation, ZoneId timeZone, DateMathParser parser, QueryShardContext context) { failIfNotIndexed(); if (parser == null) { parser = dateMathParser(); @@ -543,7 +543,8 @@ public Field getRangeField(String name, Range r) { return new LongRange(name, new long[] {((Number)r.from).longValue()}, new long[] {((Number)r.to).longValue()}); } private Number parse(DateMathParser dateMathParser, String dateStr) { - return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");}); + return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");}) + .toEpochMilli(); } @Override public Number parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) @@ -586,18 +587,18 @@ public Query dvRangeQuery(String field, QueryType queryType, Object from, Object @Override public Query rangeQuery(String field, boolean hasDocValues, Object lowerTerm, Object upperTerm, boolean includeLower, - boolean includeUpper, ShapeRelation relation, @Nullable DateTimeZone timeZone, + boolean includeUpper, ShapeRelation relation, @Nullable ZoneId timeZone, @Nullable DateMathParser parser, QueryShardContext context) { - DateTimeZone zone = (timeZone == null) ? DateTimeZone.UTC : timeZone; - ZoneId zoneId = DateUtils.dateTimeZoneToZoneId(zone); + ZoneId zone = (timeZone == null) ? ZoneOffset.UTC : timeZone; + DateMathParser dateMathParser = (parser == null) ? DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser() : parser; Long low = lowerTerm == null ? Long.MIN_VALUE : dateMathParser.parse(lowerTerm instanceof BytesRef ? ((BytesRef) lowerTerm).utf8ToString() : lowerTerm.toString(), - context::nowInMillis, false, zoneId); + context::nowInMillis, false, zone).toEpochMilli(); Long high = upperTerm == null ? Long.MAX_VALUE : dateMathParser.parse(upperTerm instanceof BytesRef ? ((BytesRef) upperTerm).utf8ToString() : upperTerm.toString(), - context::nowInMillis, false, zoneId); + context::nowInMillis, false, zone).toEpochMilli(); return super.rangeQuery(field, hasDocValues, low, high, includeLower, includeUpper, relation, zone, dateMathParser, context); @@ -910,7 +911,7 @@ public Object parse(Object value, boolean coerce) { return numberType.parse(value, coerce); } public Query rangeQuery(String field, boolean hasDocValues, Object from, Object to, boolean includeFrom, boolean includeTo, - ShapeRelation relation, @Nullable DateTimeZone timeZone, @Nullable DateMathParser dateMathParser, + ShapeRelation relation, @Nullable ZoneId timeZone, @Nullable DateMathParser dateMathParser, QueryShardContext context) { Object lower = from == null ? minValue() : parse(from, false); Object upper = to == null ? maxValue() : parse(to, false); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index f35e6126dbe71..6d2f0fddd86c2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.ToXContent; @@ -46,7 +45,7 @@ public static class Defaults { public static final DateFormatter[] DYNAMIC_DATE_TIME_FORMATTERS = new DateFormatter[]{ DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, - Joda.getStrictStandardDateFormatter() + DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis") }; public static final boolean DATE_DETECTION = true; public static final boolean NUMERIC_DETECTION = false; @@ -55,8 +54,7 @@ public static class Defaults { public static class Builder extends ObjectMapper.Builder { protected Explicit dynamicTemplates = new Explicit<>(new DynamicTemplate[0], false); - protected Explicit dynamicDateTimeFormatters = - new Explicit<>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false); + protected Explicit dynamicDateTimeFormatters = new Explicit<>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false); protected Explicit dateDetection = new Explicit<>(Defaults.DATE_DETECTION, false); protected Explicit numericDetection = new Explicit<>(Defaults.NUMERIC_DETECTION, false); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java index 3d3b160787050..366eb3b36f0fe 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java @@ -23,7 +23,8 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.index.query.QueryShardContext; -import org.joda.time.DateTimeZone; + +import java.time.ZoneId; /** * {@link MappedFieldType} base impl for field types that are neither dates nor ranges. @@ -40,7 +41,7 @@ protected SimpleMappedFieldType(MappedFieldType ref) { @Override public final Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, - ShapeRelation relation, DateTimeZone timeZone, DateMathParser parser, QueryShardContext context) { + ShapeRelation relation, ZoneId timeZone, DateMathParser parser, QueryShardContext context) { if (relation == ShapeRelation.DISJOINT) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support DISJOINT ranges"); @@ -52,7 +53,7 @@ public final Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includ } /** - * Same as {@link #rangeQuery(Object, Object, boolean, boolean, ShapeRelation, DateTimeZone, DateMathParser, QueryShardContext)} + * Same as {@link #rangeQuery(Object, Object, boolean, boolean, ShapeRelation, ZoneId, DateMathParser, QueryShardContext)} * but without the trouble of relations or date-specific options. */ protected Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index d93caf3c4e8d1..8cf66009ea140 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -43,7 +43,6 @@ public class TypeParsers { public static final String INDEX_OPTIONS_POSITIONS = "positions"; public static final String INDEX_OPTIONS_OFFSETS = "offsets"; - private static void parseAnalyzersAndTermVectors(FieldMapper.Builder builder, String name, Map fieldNode, Mapper.TypeParser.ParserContext parserContext) { NamedAnalyzer indexAnalyzer = null; diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index 733875075b051..f5be9650b8d5c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -68,6 +68,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { PARSER.declareInt(InnerHitBuilder::setSize, SearchSourceBuilder.SIZE_FIELD); PARSER.declareBoolean(InnerHitBuilder::setExplain, SearchSourceBuilder.EXPLAIN_FIELD); PARSER.declareBoolean(InnerHitBuilder::setVersion, SearchSourceBuilder.VERSION_FIELD); + PARSER.declareBoolean(InnerHitBuilder::setSeqNoAndPrimaryTerm, SearchSourceBuilder.SEQ_NO_PRIMARY_TERM_FIELD); PARSER.declareBoolean(InnerHitBuilder::setTrackScores, SearchSourceBuilder.TRACK_SCORES_FIELD); PARSER.declareStringArray(InnerHitBuilder::setStoredFieldNames, SearchSourceBuilder.STORED_FIELDS_FIELD); PARSER.declareObjectArray(InnerHitBuilder::setDocValueFields, @@ -117,7 +118,6 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { }, COLLAPSE_FIELD, ObjectParser.ValueType.OBJECT); } - private String name; private boolean ignoreUnmapped; @@ -125,6 +125,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { private int size = 3; private boolean explain; private boolean version; + private boolean seqNoAndPrimaryTerm; private boolean trackScores; private StoredFieldsContext storedFieldsContext; @@ -155,6 +156,11 @@ public InnerHitBuilder(StreamInput in) throws IOException { size = in.readVInt(); explain = in.readBoolean(); version = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)){ + seqNoAndPrimaryTerm = in.readBoolean(); + } else { + seqNoAndPrimaryTerm = false; + } trackScores = in.readBoolean(); storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); if (in.getVersion().before(Version.V_6_4_0)) { @@ -199,6 +205,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(size); out.writeBoolean(explain); out.writeBoolean(version); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(seqNoAndPrimaryTerm); + } out.writeBoolean(trackScores); out.writeOptionalWriteable(storedFieldsContext); if (out.getVersion().before(Version.V_6_4_0)) { @@ -299,6 +308,15 @@ public InnerHitBuilder setVersion(boolean version) { return this; } + public boolean isSeqNoAndPrimaryTerm() { + return seqNoAndPrimaryTerm; + } + + public InnerHitBuilder setSeqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; + return this; + } + public boolean isTrackScores() { return trackScores; } @@ -436,6 +454,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(SearchSourceBuilder.FROM_FIELD.getPreferredName(), from); builder.field(SearchSourceBuilder.SIZE_FIELD.getPreferredName(), size); builder.field(SearchSourceBuilder.VERSION_FIELD.getPreferredName(), version); + builder.field(SearchSourceBuilder.SEQ_NO_PRIMARY_TERM_FIELD.getPreferredName(), seqNoAndPrimaryTerm); builder.field(SearchSourceBuilder.EXPLAIN_FIELD.getPreferredName(), explain); builder.field(SearchSourceBuilder.TRACK_SCORES_FIELD.getPreferredName(), trackScores); if (fetchSourceContext != null) { @@ -494,6 +513,7 @@ public boolean equals(Object o) { Objects.equals(size, that.size) && Objects.equals(explain, that.explain) && Objects.equals(version, that.version) && + Objects.equals(seqNoAndPrimaryTerm, that.seqNoAndPrimaryTerm) && Objects.equals(trackScores, that.trackScores) && Objects.equals(storedFieldsContext, that.storedFieldsContext) && Objects.equals(docValueFields, that.docValueFields) && @@ -506,7 +526,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(name, ignoreUnmapped, from, size, explain, version, trackScores, + return Objects.hash(name, ignoreUnmapped, from, size, explain, version, seqNoAndPrimaryTerm, trackScores, storedFieldsContext, docValueFields, scriptFields, fetchSourceContext, sorts, highlightBuilder, innerCollapseBuilder); } diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java index 1fe781f38ce27..9e6a766aed891 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java @@ -78,6 +78,7 @@ protected void setupInnerHitsContext(QueryShardContext queryShardContext, innerHitsContext.size(innerHitBuilder.getSize()); innerHitsContext.explain(innerHitBuilder.isExplain()); innerHitsContext.version(innerHitBuilder.isVersion()); + innerHitsContext.seqNoAndPrimaryTerm(innerHitBuilder.isSeqNoAndPrimaryTerm()); innerHitsContext.trackScores(innerHitBuilder.isTrackScores()); if (innerHitBuilder.getStoredFieldsContext() != null) { innerHitsContext.storedFieldsContext(innerHitBuilder.getStoredFieldsContext()); diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java index 7f42eb137190d..b39f2ab5a91e6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java @@ -275,6 +275,11 @@ public MatchesIterator matches(String field, LeafReaderContext ctx, int doc) { return null; } + @Override + public int minExtent() { + return 0; + } + @Override public void extractTerms(String field, Set terms) { diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index d2b432e7c7ca1..3c3856e208f04 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -368,6 +368,14 @@ static final class NestedInnerHitSubContext extends InnerHitsContext.InnerHitSub this.childObjectMapper = childObjectMapper; } + @Override + public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + assert seqNoAndPrimaryTerm() == false; + if (seqNoAndPrimaryTerm) { + throw new UnsupportedOperationException("nested documents are not assigned sequence numbers"); + } + } + @Override public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException { Weight innerHitQueryWeight = createInnerHitQueryWeight(); diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 6ae9055efcefc..363384030a2ac 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -38,9 +38,9 @@ import org.elasticsearch.index.query.support.QueryParsers; import org.elasticsearch.index.search.QueryParserHelper; import org.elasticsearch.index.search.QueryStringQueryParser; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -144,7 +144,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder i private static final ParseField RELATION_FIELD = new ParseField("relation"); private final String fieldName; - private Object from; - private Object to; - - private DateTimeZone timeZone; - + private ZoneId timeZone; private boolean includeLower = DEFAULT_INCLUDE_LOWER; - private boolean includeUpper = DEFAULT_INCLUDE_UPPER; - - private DateFormatter format; - + private String format; private ShapeRelation relation; /** @@ -101,11 +95,8 @@ public RangeQueryBuilder(StreamInput in) throws IOException { to = in.readGenericValue(); includeLower = in.readBoolean(); includeUpper = in.readBoolean(); - timeZone = in.readOptionalTimeZone(); - String formatString = in.readOptionalString(); - if (formatString != null) { - format = DateFormatter.forPattern(formatString); - } + timeZone = in.readOptionalZoneId(); + format = in.readOptionalString(); String relationString = in.readOptionalString(); if (relationString != null) { relation = ShapeRelation.getRelationByName(relationString); @@ -129,12 +120,8 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeGenericValue(this.to); out.writeBoolean(this.includeLower); out.writeBoolean(this.includeUpper); - out.writeOptionalTimeZone(timeZone); - String formatString = null; - if (this.format != null) { - formatString = this.format.pattern(); - } - out.writeOptionalString(formatString); + out.writeOptionalZoneId(timeZone); + out.writeOptionalString(format); String relationString = null; if (this.relation != null) { relationString = this.relation.getRelationName(); @@ -267,7 +254,11 @@ public RangeQueryBuilder timeZone(String timeZone) { if (timeZone == null) { throw new IllegalArgumentException("timezone cannot be null"); } - this.timeZone = DateTimeZone.forID(timeZone); + try { + this.timeZone = ZoneId.of(timeZone); + } catch (DateTimeException e) { + throw new IllegalArgumentException(e); + } return this; } @@ -275,10 +266,10 @@ public RangeQueryBuilder timeZone(String timeZone) { * In case of date field, gets the from/to fields timezone adjustment */ public String timeZone() { - return this.timeZone == null ? null : this.timeZone.getID(); + return this.timeZone == null ? null : this.timeZone.getId(); } - DateTimeZone getDateTimeZone() { // for testing + ZoneId getDateTimeZone() { // for testing return timeZone; } @@ -289,7 +280,9 @@ public RangeQueryBuilder format(String format) { if (format == null) { throw new IllegalArgumentException("format cannot be null"); } - this.format = DateFormatter.forPattern(format); + // this just ensure that the pattern is actually valid, no need to keep it here + DateFormatter.forPattern(format); + this.format = format; return this; } @@ -297,12 +290,12 @@ public RangeQueryBuilder format(String format) { * Gets the format field to parse the from/to fields */ public String format() { - return this.format == null ? null : this.format.pattern(); + return format; } DateMathParser getForceDateParser() { // pkg private for testing - if (this.format != null) { - return this.format.toDateMathParser(); + if (Strings.hasText(format)) { + return DateFormatter.forPattern(this.format).toDateMathParser(); } return null; } @@ -334,10 +327,10 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.field(INCLUDE_LOWER_FIELD.getPreferredName(), includeLower); builder.field(INCLUDE_UPPER_FIELD.getPreferredName(), includeUpper); if (timeZone != null) { - builder.field(TIME_ZONE_FIELD.getPreferredName(), timeZone.getID()); + builder.field(TIME_ZONE_FIELD.getPreferredName(), timeZone.getId()); } - if (format != null) { - builder.field(FORMAT_FIELD.getPreferredName(), format.pattern()); + if (Strings.hasText(format)) { + builder.field(FORMAT_FIELD.getPreferredName(), format); } if (relation != null) { builder.field(RELATION_FIELD.getPreferredName(), relation.getRelationName()); @@ -531,21 +524,17 @@ protected Query doToQuery(QueryShardContext context) throws IOException { @Override protected int doHashCode() { - String timeZoneId = timeZone == null ? null : timeZone.getID(); - String formatString = format == null ? null : format.pattern(); - return Objects.hash(fieldName, from, to, timeZoneId, includeLower, includeUpper, formatString); + return Objects.hash(fieldName, from, to, timeZone, includeLower, includeUpper, format); } @Override protected boolean doEquals(RangeQueryBuilder other) { - String timeZoneId = timeZone == null ? null : timeZone.getID(); - String formatString = format == null ? null : format.pattern(); return Objects.equals(fieldName, other.fieldName) && Objects.equals(from, other.from) && Objects.equals(to, other.to) && - Objects.equals(timeZoneId, other.timeZone()) && + Objects.equals(timeZone, other.timeZone) && Objects.equals(includeLower, other.includeLower) && Objects.equals(includeUpper, other.includeUpper) && - Objects.equals(formatString, other.format()); + Objects.equals(format, other.format); } } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 4974ef9277e9a..dc5354c7e0522 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -54,9 +54,9 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -89,7 +89,7 @@ public class QueryStringQueryParser extends XQueryParser { private Analyzer forceQuoteAnalyzer; private String quoteFieldSuffix; private boolean analyzeWildcard; - private DateTimeZone timeZone; + private ZoneId timeZone; private Fuzziness fuzziness = Fuzziness.AUTO; private int fuzzyMaxExpansions = FuzzyQuery.defaultMaxExpansions; private MappedFieldType currentFieldType; @@ -227,7 +227,7 @@ public void setAnalyzeWildcard(boolean analyzeWildcard) { /** * @param timeZone Time Zone to be applied to any range query related to dates. */ - public void setTimeZone(DateTimeZone timeZone) { + public void setTimeZone(ZoneId timeZone) { this.timeZone = timeZone; } diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 9bc4e4ead1269..0423559aaf5a5 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; @@ -290,16 +289,14 @@ private void reportSuccessWithExistingSyncId(ShardId shardId, listener.onResponse(new ShardsSyncedFlushResult(shardId, existingSyncId, totalShards, results)); } - final IndexShardRoutingTable getShardRoutingTable(ShardId shardId, ClusterState state) { - final IndexRoutingTable indexRoutingTable = state.routingTable().index(shardId.getIndexName()); - if (indexRoutingTable == null) { - IndexMetaData index = state.getMetaData().index(shardId.getIndex()); - if (index != null && index.getState() == IndexMetaData.State.CLOSE) { - throw new IndexClosedException(shardId.getIndex()); - } + final IndexShardRoutingTable getShardRoutingTable(final ShardId shardId, final ClusterState state) { + final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex()); + if (indexMetaData == null) { throw new IndexNotFoundException(shardId.getIndexName()); + } else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { + throw new IndexClosedException(shardId.getIndex()); } - final IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId.id()); + final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(indexMetaData.getIndex()).shard(shardId.id()); if (shardRoutingTable == null) { throw new ShardNotFoundException(shardId); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 3a48702fd552b..bc2196501fb8e 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -431,13 +431,13 @@ public interface RecoveryListener { class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel, - Task task) throws Exception { - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() - )) { - recoveryRef.target().prepareForTranslogOperations(request.isFileBasedRecovery(), request.totalTranslogOps()); + public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel, Task task) { + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { + final ActionListener listener = + new HandledTransportAction.ChannelActionListener<>(channel, Actions.PREPARE_TRANSLOG, request); + recoveryRef.target().prepareForTranslogOperations(request.isFileBasedRecovery(), request.totalTranslogOps(), + ActionListener.wrap(nullVal -> listener.onResponse(TransportResponse.Empty.INSTANCE), listener::onFailure)); } - channel.sendResponse(TransportResponse.Empty.INSTANCE); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java index 605d202cbaf9b..acac7e732b38b 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java @@ -60,9 +60,9 @@ public final class RecoveryResponse extends TransportResponse { RecoveryResponse(StreamInput in) throws IOException { super(in); - phase1FileNames = in.readList(StreamInput::readString); + phase1FileNames = in.readStringList(); phase1FileSizes = in.readList(StreamInput::readVLong); - phase1ExistingFileNames = in.readList(StreamInput::readString); + phase1ExistingFileNames = in.readStringList(); phase1ExistingFileSizes = in.readList(StreamInput::readVLong); phase1TotalSize = in.readVLong(); phase1ExistingTotalSize = in.readVLong(); @@ -76,9 +76,9 @@ public final class RecoveryResponse extends TransportResponse { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeStringList(phase1FileNames); + out.writeStringCollection(phase1FileNames); out.writeCollection(phase1FileSizes, StreamOutput::writeVLong); - out.writeStringList(phase1ExistingFileNames); + out.writeStringCollection(phase1ExistingFileNames); out.writeCollection(phase1ExistingFileSizes, StreamOutput::writeVLong); out.writeVLong(phase1TotalSize); out.writeVLong(phase1ExistingTotalSize); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 34434f50b456f..f360a68b7a83c 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -197,51 +197,51 @@ public void recoverToTarget(ActionListener listener) { assert requiredSeqNoRangeStart >= startingSeqNo : "requiredSeqNoRangeStart [" + requiredSeqNoRangeStart + "] is lower than [" + startingSeqNo + "]"; - final TimeValue prepareEngineTime; - try { - // For a sequence based recovery, the target can keep its local translog - prepareEngineTime = prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, - shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); - } catch (final Exception e) { - throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e); - } + final StepListener prepareEngineStep = new StepListener<>(); + // For a sequence based recovery, the target can keep its local translog + prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, + shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo), prepareEngineStep); + final StepListener sendSnapshotStep = new StepListener<>(); + prepareEngineStep.whenComplete(prepareEngineTime -> { + /* + * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. + * This means that any document indexed into the primary after this will be replicated to this replica as well + * make sure to do this before sampling the max sequence number in the next step, to ensure that we send + * all documents up to maxSeqNo in phase2. + */ + runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId()), + shardId + " initiating tracking of " + request.targetAllocationId(), shard, cancellableThreads, logger); - /* - * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. - * This means that any document indexed into the primary after this will be replicated to this replica as well - * make sure to do this before sampling the max sequence number in the next step, to ensure that we send - * all documents up to maxSeqNo in phase2. - */ - runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId()), - shardId + " initiating tracking of " + request.targetAllocationId(), shard, cancellableThreads, logger); - - final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); - /* - * We need to wait for all operations up to the current max to complete, otherwise we can not guarantee that all - * operations in the required range will be available for replaying from the translog of the source. - */ - cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo)); - - if (logger.isTraceEnabled()) { - logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo); - logger.trace("snapshot translog for recovery; current size is [{}]", - shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); - } + final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); + /* + * We need to wait for all operations up to the current max to complete, otherwise we can not guarantee that all + * operations in the required range will be available for replaying from the translog of the source. + */ + cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo)); + if (logger.isTraceEnabled()) { + logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo); + logger.trace("snapshot translog for recovery; current size is [{}]", + shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); + } + final Translog.Snapshot phase2Snapshot = shard.getHistoryOperations("peer-recovery", startingSeqNo); + resources.add(phase2Snapshot); + // we can release the retention lock here because the snapshot itself will retain the required operations. + retentionLock.close(); + // we have to capture the max_seen_auto_id_timestamp and the max_seq_no_of_updates to make sure that these values + // are at least as high as the corresponding values on the primary when any of these operations were executed on it. + final long maxSeenAutoIdTimestamp = shard.getMaxSeenAutoIdTimestamp(); + final long maxSeqNoOfUpdatesOrDeletes = shard.getMaxSeqNoOfUpdatesOrDeletes(); + phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, phase2Snapshot, maxSeenAutoIdTimestamp, + maxSeqNoOfUpdatesOrDeletes, sendSnapshotStep); + sendSnapshotStep.whenComplete( + r -> IOUtils.close(phase2Snapshot), + e -> { + IOUtils.closeWhileHandlingException(phase2Snapshot); + onFailure.accept(new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e)); + }); + + }, onFailure); - final Translog.Snapshot phase2Snapshot = shard.getHistoryOperations("peer-recovery", startingSeqNo); - resources.add(phase2Snapshot); - // we can release the retention lock here because the snapshot itself will retain the required operations. - IOUtils.close(retentionLock); - // we have to capture the max_seen_auto_id_timestamp and the max_seq_no_of_updates to make sure that these values - // are at least as high as the corresponding values on the primary when any of these operations were executed on it. - final long maxSeenAutoIdTimestamp = shard.getMaxSeenAutoIdTimestamp(); - final long maxSeqNoOfUpdatesOrDeletes = shard.getMaxSeqNoOfUpdatesOrDeletes(); - final StepListener sendSnapshotStep = new StepListener<>(); - phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, phase2Snapshot, maxSeenAutoIdTimestamp, - maxSeqNoOfUpdatesOrDeletes, sendSnapshotStep); - sendSnapshotStep.whenComplete( - r -> IOUtils.close(phase2Snapshot), - e -> onFailure.accept(new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e))); final StepListener finalizeStep = new StepListener<>(); sendSnapshotStep.whenComplete(r -> finalizeRecovery(r.targetLocalCheckpoint, finalizeStep), onFailure); @@ -251,7 +251,7 @@ public void recoverToTarget(ActionListener listener) { final RecoveryResponse response = new RecoveryResponse(sendFileResult.phase1FileNames, sendFileResult.phase1FileSizes, sendFileResult.phase1ExistingFileNames, sendFileResult.phase1ExistingFileSizes, sendFileResult.totalSize, sendFileResult.existingTotalSize, sendFileResult.took.millis(), phase1ThrottlingWaitTime, - prepareEngineTime.millis(), sendSnapshotResult.totalOperations, sendSnapshotResult.tookTime.millis()); + prepareEngineStep.result().millis(), sendSnapshotResult.totalOperations, sendSnapshotResult.tookTime.millis()); try { wrappedListener.onResponse(response); } finally { @@ -484,16 +484,21 @@ public SendFileResult phase1(final IndexCommit snapshot, final Supplier } } - TimeValue prepareTargetForTranslog(final boolean fileBasedRecovery, final int totalTranslogOps) throws IOException { + void prepareTargetForTranslog(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { StopWatch stopWatch = new StopWatch().start(); - logger.trace("recovery [phase1]: prepare remote engine for translog"); + final ActionListener wrappedListener = ActionListener.wrap( + nullVal -> { + stopWatch.stop(); + final TimeValue tookTime = stopWatch.totalTime(); + logger.trace("recovery [phase1]: remote engine start took [{}]", tookTime); + listener.onResponse(tookTime); + }, + e -> listener.onFailure(new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e))); // Send a request preparing the new shard's translog to receive operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes. - cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps)); - stopWatch.stop(); - final TimeValue tookTime = stopWatch.totalTime(); - logger.trace("recovery [phase1]: remote engine start took [{}]", tookTime); - return tookTime; + logger.trace("recovery [phase1]: prepare remote engine for translog"); + cancellableThreads.execute(() -> + recoveryTarget.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps, wrappedListener)); } /** diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 5950f71006a85..b300490542968 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -366,9 +366,12 @@ private void ensureRefCount() { /*** Implementation of {@link RecoveryTargetHandler } */ @Override - public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { - state().getTranslog().totalOperations(totalTranslogOps); - indexShard().openEngineAndSkipTranslogRecovery(); + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { + ActionListener.completeWith(listener, () -> { + state().getTranslog().totalOperations(totalTranslogOps); + indexShard().openEngineAndSkipTranslogRecovery(); + return null; + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java index 3372c933559c6..0fa5e94c63cd2 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -35,7 +35,7 @@ public interface RecoveryTargetHandler { * @param fileBasedRecovery whether or not this call is part of an file based recovery * @param totalTranslogOps total translog operations expected to be sent */ - void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException; + void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener); /** * The finalize request refreshes the engine now that new segments are available, enables garbage collection of tombstone files, and diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index ba703aeee2850..0799cc6595189 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -77,11 +77,12 @@ public RemoteRecoveryTargetHandler(long recoveryId, ShardId shardId, TransportSe } @Override - public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, - new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, fileBasedRecovery), - TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), - EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, fileBasedRecovery), + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), + new ActionListenerResponseHandler<>(ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure), + in -> TransportResponse.Empty.INSTANCE, ThreadPool.Names.GENERIC)); } @Override diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 719558edbf748..90ebc8e074108 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -19,9 +19,6 @@ package org.elasticsearch.ingest; -import java.util.Collections; -import java.util.IdentityHashMap; -import java.util.Set; import org.elasticsearch.common.Strings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -37,12 +34,15 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; +import java.util.Collections; import java.util.Date; import java.util.EnumMap; import java.util.HashMap; +import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; /** * Represents a single document being captured before indexing and holds the source and metadata (like id, type and index). diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index 5b55c00875d47..7bf26fd5e57a4 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -22,7 +22,6 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import java.lang.management.ManagementFactory; @@ -43,7 +42,7 @@ public class HotThreads { private static final Object mutex = new Object(); - private static final DateFormatter DATE_TIME_FORMATTER = DateFormatters.forPattern("dateOptionalTime"); + private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("dateOptionalTime"); private int busiestThreads = 3; private TimeValue interval = new TimeValue(500, TimeUnit.MILLISECONDS); diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index 71918cadb55d8..fb914a34bfbaa 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -247,6 +247,45 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS }); } + /** + * This unassigns a task from any node, i.e. it is assigned to a {@code null} node with the provided reason. + * + * Since the assignment executor node is null, the {@link PersistentTasksClusterService} will attempt to reassign it to a valid + * node quickly. + * + * @param taskId the id of a persistent task + * @param taskAllocationId the expected allocation id of the persistent task + * @param reason the reason for unassigning the task from any node + * @param listener the listener that will be called when task is unassigned + */ + public void unassignPersistentTask(final String taskId, + final long taskAllocationId, + final String reason, + final ActionListener> listener) { + clusterService.submitStateUpdateTask("unassign persistent task from any node", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + PersistentTasksCustomMetaData.Builder tasksInProgress = builder(currentState); + if (tasksInProgress.hasTask(taskId, taskAllocationId)) { + logger.trace("Unassigning task {} with allocation id {}", taskId, taskAllocationId); + return update(currentState, tasksInProgress.reassignTask(taskId, unassignedAssignment(reason))); + } else { + throw new ResourceNotFoundException("the task with id {} and allocation id {} doesn't exist", taskId, taskAllocationId); + } + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(newState, taskId)); + } + }); + } + /** * Creates a new {@link Assignment} for the given persistent task. * @@ -263,7 +302,7 @@ private Assignment createAssignment(final AssignmentDecision decision = decider.canAssign(); if (decision.getType() == AssignmentDecision.Type.NO) { - return new Assignment(null, "persistent task [" + taskName + "] cannot be assigned [" + decision.getReason() + "]"); + return unassignedAssignment("persistent task [" + taskName + "] cannot be assigned [" + decision.getReason() + "]"); } return persistentTasksExecutor.getAssignment(taskParams, currentState); @@ -404,6 +443,10 @@ private static ClusterState update(ClusterState currentState, PersistentTasksCus } } + private static Assignment unassignedAssignment(String reason) { + return new Assignment(null, reason); + } + /** * Class to periodically try to reassign unassigned persistent tasks. */ diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java index b7a179e41e381..9f31c6348a0a1 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java @@ -62,6 +62,7 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable> tasks; @@ -119,6 +120,11 @@ public PersistentTasksCustomMetaData(long lastAllocationId, Map PersistentTask getTa return null; } + /** + * Unassign any persistent tasks executing on nodes that are no longer in + * the cluster. If the task's assigment has a non-null executor node and that + * node is no longer in the cluster then the assignment is set to + * {@link #LOST_NODE_ASSIGNMENT} + * + * @param clusterState The clusterstate + * @return If no changes the argument {@code clusterState} is returned else + * a copy with the modified tasks + */ + public static ClusterState disassociateDeadNodes(ClusterState clusterState) { + PersistentTasksCustomMetaData tasks = getPersistentTasksCustomMetaData(clusterState); + if (tasks == null) { + return clusterState; + } + + PersistentTasksCustomMetaData.Builder taskBuilder = PersistentTasksCustomMetaData.builder(tasks); + for (PersistentTask task : tasks.tasks()) { + if (task.getAssignment().getExecutorNode() != null && + clusterState.nodes().nodeExists(task.getAssignment().getExecutorNode()) == false) { + taskBuilder.reassignTask(task.getId(), LOST_NODE_ASSIGNMENT); + } + } + + if (taskBuilder.isChanged() == false) { + return clusterState; + } + + MetaData.Builder metaDataBuilder = MetaData.builder(clusterState.metaData()); + metaDataBuilder.putCustom(TYPE, taskBuilder.build()); + return ClusterState.builder(clusterState).metaData(metaDataBuilder).build(); + } + public static class Assignment { @Nullable private final String executorNode; diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java index d211efef5173e..3efd149be19c6 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java @@ -103,7 +103,7 @@ public PluginInfo(final StreamInput in) throws IOException { } this.classname = in.readString(); if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - extendedPlugins = in.readList(StreamInput::readString); + extendedPlugins = in.readStringList(); } else { extendedPlugins = Collections.emptyList(); } @@ -128,7 +128,7 @@ public void writeTo(final StreamOutput out) throws IOException { } out.writeString(classname); if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeStringList(extendedPlugins); + out.writeStringCollection(extendedPlugins); } out.writeBoolean(hasNativeController); if (out.getVersion().onOrAfter(Version.V_6_0_0_beta2) && out.getVersion().before(Version.V_6_3_0)) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java index 54cc9fd988f13..a7065622a86e5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java @@ -19,12 +19,14 @@ package org.elasticsearch.rest.action.admin.indices; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; @@ -43,6 +45,12 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestGetFieldMappingAction extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestPutMappingAction.class)); + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Using include_type_name in get " + + "field mapping requests is deprecated. The parameter will be removed in the next major version."; + public RestGetFieldMappingAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(GET, "/_mapping/field/{fields}", this); @@ -68,6 +76,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC throw new IllegalArgumentException("Types cannot be specified unless include_type_name" + " is set to true."); } + if (request.hasParam(INCLUDE_TYPE_NAME_PARAMETER)) { + deprecationLogger.deprecatedAndMaybeLog("get_field_mapping_with_types", TYPES_DEPRECATION_MESSAGE); + } GetFieldMappingsRequest getMappingsRequest = new GetFieldMappingsRequest(); getMappingsRequest.indices(indices).types(types).fields(fields).includeDefaults(request.paramAsBoolean("include_defaults", false)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index 676f2bbdc7b2e..bb449d584b2c8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -39,7 +39,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.RestController; @@ -61,6 +61,7 @@ public class RestIndicesAction extends AbstractCatAction { + private static final DateFormatter STRICT_DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_time"); private final IndexNameExpressionResolver indexNameExpressionResolver; public RestIndicesAction(Settings settings, RestController controller, IndexNameExpressionResolver indexNameExpressionResolver) { @@ -432,7 +433,7 @@ Table buildTable(RestRequest request, Index[] indices, ClusterHealthResponse res table.addCell(indexMetaData.getCreationDate()); ZonedDateTime creationTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(indexMetaData.getCreationDate()), ZoneOffset.UTC); - table.addCell(DateFormatters.forPattern("strict_date_time").format(creationTime)); + table.addCell(STRICT_DATE_TIME_FORMATTER.format(creationTime)); table.addCell(totalStats.getStore() == null ? null : totalStats.getStore().size()); table.addCell(primaryStats.getStore() == null ? null : primaryStats.getStore().size()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java index fb302b1b3b3a4..22258ce2d8878 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -99,7 +98,7 @@ protected Table getTableWithHeader(RestRequest request) { .endHeaders(); } - private static final DateFormatter FORMATTER = DateFormatters.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); + private static final DateFormatter FORMATTER = DateFormatter.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); private Table buildTable(RestRequest req, GetSnapshotsResponse getSnapshotsResponse) { Table table = getTableWithHeader(req); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java index 39b3f08dcdc5f..573eac6c04941 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -125,7 +124,7 @@ protected Table getTableWithHeader(final RestRequest request) { return table; } - private static final DateFormatter FORMATTER = DateFormatters.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); + private static final DateFormatter FORMATTER = DateFormatter.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); private void buildRow(Table table, boolean fullId, boolean detailed, DiscoveryNodes discoveryNodes, TaskInfo taskInfo) { table.startRow(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 3e3a1e02a174b..da773efed580d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -201,6 +201,9 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil if (request.hasParam("version")) { searchSourceBuilder.version(request.paramAsBoolean("version", null)); } + if (request.hasParam("seq_no_primary_term")) { + searchSourceBuilder.seqNoAndPrimaryTerm(request.paramAsBoolean("seq_no_primary_term", null)); + } if (request.hasParam("timeout")) { searchSourceBuilder.timeout(request.paramAsTime("timeout", null)); } diff --git a/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java b/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java index 546deb3a24b68..fc3816cad8a15 100644 --- a/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java +++ b/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateUtils; import org.joda.time.DateTime; @@ -50,7 +49,7 @@ * A wrapper around ZonedDateTime that exposes joda methods for backcompat. */ public class JodaCompatibleZonedDateTime { - private static final DateFormatter DATE_FORMATTER = DateFormatters.forPattern("strict_date_time"); + private static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("strict_date_time"); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(JodaCompatibleZonedDateTime.class)); diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java index a911a4f197d67..f1358bc3c6ba1 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java @@ -41,7 +41,7 @@ public final class ScoreScriptUtils { /****** STATIC FUNCTIONS that can be used by users for score calculations **/ - public static double rational(double value, double k) { + public static double saturation(double value, double k) { return value/ (k + value); } @@ -212,7 +212,7 @@ public static final class DecayDateLinear { double scaling; public DecayDateLinear(String originStr, String scaleStr, String offsetStr, double decay) { - this.origin = dateParser.parse(originStr, null, false, defaultZoneId); + this.origin = dateParser.parse(originStr, null, false, defaultZoneId).toEpochMilli(); long scale = TimeValue.parseTimeValue(scaleStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".scale") .getMillis(); this.offset = TimeValue.parseTimeValue(offsetStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".offset") @@ -235,7 +235,7 @@ public static final class DecayDateExp { double scaling; public DecayDateExp(String originStr, String scaleStr, String offsetStr, double decay) { - this.origin = dateParser.parse(originStr, null, false, defaultZoneId); + this.origin = dateParser.parse(originStr, null, false, defaultZoneId).toEpochMilli(); long scale = TimeValue.parseTimeValue(scaleStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".scale") .getMillis(); this.offset = TimeValue.parseTimeValue(offsetStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".offset") @@ -258,7 +258,7 @@ public static final class DecayDateGauss { double scaling; public DecayDateGauss(String originStr, String scaleStr, String offsetStr, double decay) { - this.origin = dateParser.parse(originStr, null, false, defaultZoneId); + this.origin = dateParser.parse(originStr, null, false, defaultZoneId).toEpochMilli(); long scale = TimeValue.parseTimeValue(scaleStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".scale") .getMillis(); this.offset = TimeValue.parseTimeValue(offsetStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".offset") diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 590c58b1f6615..4b82e23e42061 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -107,6 +107,7 @@ final class DefaultSearchContext extends SearchContext { private ScrollContext scrollContext; private boolean explain; private boolean version = false; // by default, we don't return versions + private boolean seqAndPrimaryTerm = false; private StoredFieldsContext storedFields; private ScriptFieldsContext scriptFields; private FetchSourceContext fetchSourceContext; @@ -719,6 +720,16 @@ public void version(boolean version) { this.version = version; } + @Override + public boolean seqNoAndPrimaryTerm() { + return seqAndPrimaryTerm; + } + + @Override + public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + this.seqAndPrimaryTerm = seqNoAndPrimaryTerm; + } + @Override public int[] docIdsToLoad() { return docIdsToLoad; diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 900e1d7fd09ca..ceefe035d4613 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -21,6 +21,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,7 +31,6 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.time.DateUtils; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.net.InetAddress; @@ -38,6 +38,7 @@ import java.text.DecimalFormatSymbols; import java.text.NumberFormat; import java.text.ParseException; +import java.time.Instant; import java.time.ZoneId; import java.util.Arrays; import java.util.Base64; @@ -164,20 +165,24 @@ final class DateTime implements DocValueFormat { public static final String NAME = "date_time"; final DateFormatter formatter; - // TODO: change this to ZoneId, but will require careful change to serialization - final DateTimeZone timeZone; - private final ZoneId zoneId; + final ZoneId timeZone; private final DateMathParser parser; - public DateTime(DateFormatter formatter, DateTimeZone timeZone) { - this.formatter = Objects.requireNonNull(formatter); + public DateTime(DateFormatter formatter, ZoneId timeZone) { + this.formatter = formatter; this.timeZone = Objects.requireNonNull(timeZone); - this.zoneId = DateUtils.dateTimeZoneToZoneId(timeZone); this.parser = formatter.toDateMathParser(); } public DateTime(StreamInput in) throws IOException { - this(DateFormatter.forPattern(in.readString()), DateTimeZone.forID(in.readString())); + this.formatter = DateFormatter.forPattern(in.readString()); + this.parser = formatter.toDateMathParser(); + String zoneId = in.readString(); + if (in.getVersion().before(Version.V_7_0_0)) { + this.timeZone = DateUtils.of(zoneId); + } else { + this.timeZone = ZoneId.of(zoneId); + } } @Override @@ -188,12 +193,16 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); - out.writeString(timeZone.getID()); + if (out.getVersion().before(Version.V_7_0_0)) { + out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); + } else { + out.writeString(timeZone.getId()); + } } @Override public String format(long value) { - return formatter.withZone(zoneId).formatMillis(value); + return formatter.format(Instant.ofEpochMilli(value).atZone(timeZone)); } @Override @@ -203,7 +212,7 @@ public String format(double value) { @Override public long parseLong(String value, boolean roundUp, LongSupplier now) { - return parser.parse(value, now, roundUp, DateUtils.dateTimeZoneToZoneId(timeZone)); + return parser.parse(value, now, roundUp, timeZone).toEpochMilli(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 7fd68852ce284..42f96e52fb119 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -46,6 +47,7 @@ import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.lookup.SourceLookup; @@ -91,6 +93,8 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable, parser.declareField((map, value) -> map.put(Fields._SCORE, value), SearchHit::parseScore, new ParseField(Fields._SCORE), ValueType.FLOAT_OR_NULL); parser.declareLong((map, value) -> map.put(Fields._VERSION, value), new ParseField(Fields._VERSION)); + parser.declareLong((map, value) -> map.put(Fields._SEQ_NO, value), new ParseField(Fields._SEQ_NO)); + parser.declareLong((map, value) -> map.put(Fields._PRIMARY_TERM, value), new ParseField(Fields._PRIMARY_TERM)); parser.declareField((map, value) -> map.put(Fields._SHARD, value), (p, c) -> ShardId.fromString(p.text()), new ParseField(Fields._SHARD), ValueType.STRING); parser.declareObject((map, value) -> map.put(SourceFieldMapper.NAME, value), (p, c) -> parseSourceBytes(p), @@ -587,6 +626,8 @@ public static SearchHit createFromMap(Map values) { } searchHit.score(get(Fields._SCORE, values, DEFAULT_SCORE)); searchHit.version(get(Fields._VERSION, values, -1L)); + searchHit.setSeqNo(get(Fields._SEQ_NO, values, SequenceNumbers.UNASSIGNED_SEQ_NO)); + searchHit.setPrimaryTerm(get(Fields._PRIMARY_TERM, values, SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); searchHit.sortValues(get(Fields.SORT, values, SearchSortValues.EMPTY)); searchHit.highlightFields(get(Fields.HIGHLIGHT, values, null)); searchHit.sourceRef(get(SourceFieldMapper.NAME, values, null)); @@ -743,6 +784,10 @@ public void readFrom(StreamInput in) throws IOException { type = in.readOptionalText(); nestedIdentity = in.readOptionalWriteable(NestedIdentity::new); version = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + seqNo = in.readZLong(); + primaryTerm = in.readVLong(); + } source = in.readBytesReference(); if (source.length() == 0) { source = null; @@ -811,6 +856,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalText(type); out.writeOptionalWriteable(nestedIdentity); out.writeLong(version); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeZLong(seqNo); + out.writeVLong(primaryTerm); + } out.writeBytesReference(source); if (explanation == null) { out.writeBoolean(false); @@ -866,6 +915,8 @@ public boolean equals(Object obj) { && Objects.equals(type, other.type) && Objects.equals(nestedIdentity, other.nestedIdentity) && Objects.equals(version, other.version) + && Objects.equals(seqNo, other.seqNo) + && Objects.equals(primaryTerm, other.primaryTerm) && Objects.equals(source, other.source) && Objects.equals(fields, other.fields) && Objects.equals(getHighlightFields(), other.getHighlightFields()) @@ -879,8 +930,8 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return Objects.hash(id, type, nestedIdentity, version, source, fields, getHighlightFields(), Arrays.hashCode(matchedQueries), - explanation, shard, innerHits, index, clusterAlias); + return Objects.hash(id, type, nestedIdentity, version, seqNo, primaryTerm, source, fields, getHighlightFields(), + Arrays.hashCode(matchedQueries), explanation, shard, innerHits, index, clusterAlias); } /** diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 2531685b94557..e75271c2885ae 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -240,6 +240,7 @@ import org.elasticsearch.search.fetch.subphase.MatchedQueriesFetchSubPhase; import org.elasticsearch.search.fetch.subphase.ScoreFetchSubPhase; import org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase; +import org.elasticsearch.search.fetch.subphase.SeqNoPrimaryTermFetchSubPhase; import org.elasticsearch.search.fetch.subphase.VersionFetchSubPhase; import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter; import org.elasticsearch.search.fetch.subphase.highlight.HighlightPhase; @@ -727,6 +728,7 @@ private void registerFetchSubPhases(List plugins) { registerFetchSubPhase(new ScriptFieldsFetchSubPhase()); registerFetchSubPhase(new FetchSourceSubPhase()); registerFetchSubPhase(new VersionFetchSubPhase()); + registerFetchSubPhase(new SeqNoPrimaryTermFetchSubPhase()); registerFetchSubPhase(new MatchedQueriesFetchSubPhase()); registerFetchSubPhase(new HighlightPhase(highlighters)); registerFetchSubPhase(new ScoreFetchSubPhase()); diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 4fee684276288..5e2758eb5b83c 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchTask; @@ -329,7 +328,7 @@ private DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchTask t } catch (Exception e) { logger.trace("Dfs phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } @@ -380,29 +379,24 @@ protected void doRun() { }); } - private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchTask task) throws IOException { + private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchTask task) throws Exception { final SearchContext context = createAndPutContext(request); - final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); context.incRef(); - boolean queryPhaseSuccess = false; try { context.setTask(task); - operationListener.onPreQueryPhase(context); - long time = System.nanoTime(); - contextProcessing(context); - - loadOrExecuteQueryPhase(request, context); - - if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) { - freeContext(context.id()); - } else { - contextProcessedSuccessfully(context); + final long afterQueryTime; + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)) { + contextProcessing(context); + loadOrExecuteQueryPhase(request, context); + if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) { + freeContext(context.id()); + } else { + contextProcessedSuccessfully(context); + } + afterQueryTime = executor.success(); } - final long afterQueryTime = System.nanoTime(); - queryPhaseSuccess = true; - operationListener.onQueryPhase(context, afterQueryTime - time); if (request.numberOfShards() == 1) { - return executeFetchPhase(context, operationListener, afterQueryTime); + return executeFetchPhase(context, afterQueryTime); } return context.queryResult(); } catch (Exception e) { @@ -411,21 +405,16 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchTa e = (e.getCause() == null || e.getCause() instanceof Exception) ? (Exception) e.getCause() : new ElasticsearchException(e.getCause()); } - if (!queryPhaseSuccess) { - operationListener.onFailedQueryPhase(context); - } logger.trace("Query phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } } - private QueryFetchSearchResult executeFetchPhase(SearchContext context, SearchOperationListener operationListener, - long afterQueryTime) { - operationListener.onPreFetchPhase(context); - try { + private QueryFetchSearchResult executeFetchPhase(SearchContext context, long afterQueryTime) { + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, afterQueryTime)){ shortcutDocIdsToLoad(context); fetchPhase.execute(context); if (fetchPhaseShouldFreeContext(context)) { @@ -433,34 +422,27 @@ private QueryFetchSearchResult executeFetchPhase(SearchContext context, SearchOp } else { contextProcessedSuccessfully(context); } - } catch (Exception e) { - operationListener.onFailedFetchPhase(context); - throw ExceptionsHelper.convertToRuntime(e); + executor.success(); } - operationListener.onFetchPhase(context, System.nanoTime() - afterQueryTime); return new QueryFetchSearchResult(context.queryResult(), context.fetchResult()); } public void executeQueryPhase(InternalScrollSearchRequest request, SearchTask task, ActionListener listener) { runAsync(request.id(), () -> { final SearchContext context = findContext(request.id(), request); - SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); context.incRef(); - try { + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)) { context.setTask(task); - operationListener.onPreQueryPhase(context); - long time = System.nanoTime(); contextProcessing(context); processScroll(request, context); queryPhase.execute(context); contextProcessedSuccessfully(context); - operationListener.onQueryPhase(context, System.nanoTime() - time); + executor.success(); return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget()); } catch (Exception e) { - operationListener.onFailedQueryPhase(context); logger.trace("Query phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } @@ -471,15 +453,10 @@ public void executeQueryPhase(QuerySearchRequest request, SearchTask task, Actio runAsync(request.id(), () -> { final SearchContext context = findContext(request.id(), request); context.setTask(task); - IndexShard indexShard = context.indexShard(); - SearchOperationListener operationListener = indexShard.getSearchOperationListener(); context.incRef(); - try { + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)) { contextProcessing(context); context.searcher().setAggregatedDfs(request.dfs()); - - operationListener.onPreQueryPhase(context); - long time = System.nanoTime(); queryPhase.execute(context); if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) { // no hits, we can release the context since there will be no fetch phase @@ -487,13 +464,12 @@ public void executeQueryPhase(QuerySearchRequest request, SearchTask task, Actio } else { contextProcessedSuccessfully(context); } - operationListener.onQueryPhase(context, System.nanoTime() - time); + executor.success(); return context.queryResult(); } catch (Exception e) { - operationListener.onFailedQueryPhase(context); logger.trace("Query phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } @@ -527,28 +503,19 @@ public void executeFetchPhase(InternalScrollSearchRequest request, SearchTask ta ActionListener listener) { runAsync(request.id(), () -> { final SearchContext context = findContext(request.id(), request); + context.setTask(task); context.incRef(); - try { - context.setTask(task); + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)){ contextProcessing(context); - SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); processScroll(request, context); - operationListener.onPreQueryPhase(context); - final long time = System.nanoTime(); - try { - queryPhase.execute(context); - } catch (Exception e) { - operationListener.onFailedQueryPhase(context); - throw ExceptionsHelper.convertToRuntime(e); - } - long afterQueryTime = System.nanoTime(); - operationListener.onQueryPhase(context, afterQueryTime - time); - QueryFetchSearchResult fetchSearchResult = executeFetchPhase(context, operationListener, afterQueryTime); + queryPhase.execute(context); + final long afterQueryTime = executor.success(); + QueryFetchSearchResult fetchSearchResult = executeFetchPhase(context, afterQueryTime); return new ScrollQueryFetchSearchResult(fetchSearchResult, context.shardTarget()); } catch (Exception e) { logger.trace("Fetch phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } @@ -558,7 +525,6 @@ public void executeFetchPhase(InternalScrollSearchRequest request, SearchTask ta public void executeFetchPhase(ShardFetchRequest request, SearchTask task, ActionListener listener) { runAsync(request.id(), () -> { final SearchContext context = findContext(request.id(), request); - final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); context.incRef(); try { context.setTask(task); @@ -567,21 +533,20 @@ public void executeFetchPhase(ShardFetchRequest request, SearchTask task, Action context.scrollContext().lastEmittedDoc = request.lastEmittedDoc(); } context.docIdsToLoad(request.docIds(), 0, request.docIdsSize()); - operationListener.onPreFetchPhase(context); - long time = System.nanoTime(); - fetchPhase.execute(context); - if (fetchPhaseShouldFreeContext(context)) { - freeContext(request.id()); - } else { - contextProcessedSuccessfully(context); + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, System.nanoTime())) { + fetchPhase.execute(context); + if (fetchPhaseShouldFreeContext(context)) { + freeContext(request.id()); + } else { + contextProcessedSuccessfully(context); + } + executor.success(); } - operationListener.onFetchPhase(context, System.nanoTime() - time); return context.fetchResult(); } catch (Exception e) { - operationListener.onFailedFetchPhase(context); logger.trace("Fetch phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } @@ -661,7 +626,7 @@ final SearchContext createContext(ShardSearchRequest request) throws IOException context.lowLevelCancellation(lowLevelCancellation); } catch (Exception e) { context.close(); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } return context; @@ -733,7 +698,7 @@ public void freeAllScrollContexts() { } } - private void contextScrollKeepAlive(SearchContext context, long keepAlive) throws IOException { + private void contextScrollKeepAlive(SearchContext context, long keepAlive) { if (keepAlive > maxKeepAlive) { throw new IllegalArgumentException( "Keep alive for scroll (" + TimeValue.timeValueMillis(keepAlive) + ") is too large. " + @@ -901,6 +866,11 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc if (source.version() != null) { context.version(source.version()); } + + if (source.seqNoAndPrimaryTerm() != null) { + context.seqNoAndPrimaryTerm(source.seqNoAndPrimaryTerm()); + } + if (source.stats() != null) { context.groupStats(source.stats()); } @@ -986,7 +956,7 @@ private void shortcutDocIdsToLoad(SearchContext context) { context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); } - private void processScroll(InternalScrollSearchRequest request, SearchContext context) throws IOException { + private void processScroll(InternalScrollSearchRequest request, SearchContext context) { // process scroll context.from(context.from() + context.size()); context.scrollContext().scroll = request.scroll(); @@ -1142,4 +1112,58 @@ public boolean canMatch() { return canMatch; } } + + /** + * This helper class ensures we only execute either the success or the failure path for {@link SearchOperationListener}. + * This is crucial for some implementations like {@link org.elasticsearch.index.search.stats.ShardSearchStats}. + */ + private static final class SearchOperationListenerExecutor implements AutoCloseable { + private final SearchOperationListener listener; + private final SearchContext context; + private final long time; + private final boolean fetch; + private long afterQueryTime = -1; + private boolean closed = false; + + SearchOperationListenerExecutor(SearchContext context) { + this(context, false, System.nanoTime()); + } + + SearchOperationListenerExecutor(SearchContext context, boolean fetch, long startTime) { + this.listener = context.indexShard().getSearchOperationListener(); + this.context = context; + time = startTime; + this.fetch = fetch; + if (fetch) { + listener.onPreFetchPhase(context); + } else { + listener.onPreQueryPhase(context); + } + } + + long success() { + return afterQueryTime = System.nanoTime(); + } + + @Override + public void close() { + assert closed == false : "already closed - while technically ok double closing is a likely a bug in this case"; + if (closed == false) { + closed = true; + if (afterQueryTime != -1) { + if (fetch) { + listener.onFetchPhase(context, afterQueryTime - time); + } else { + listener.onQueryPhase(context, afterQueryTime - time); + } + } else { + if (fetch) { + listener.onFailedFetchPhase(context); + } else { + listener.onFailedQueryPhase(context); + } + } + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 28970ec828af9..53a7832884c76 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -19,11 +19,12 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -37,9 +38,10 @@ import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.Objects; import static org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder.DATE_FIELD_UNITS; @@ -70,9 +72,9 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild }, Histogram.INTERVAL_FIELD, ObjectParser.ValueType.LONG); PARSER.declareField(DateHistogramValuesSourceBuilder::timeZone, p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return DateTimeZone.forID(p.text()); + return ZoneId.of(p.text()); } else { - return DateTimeZone.forOffsetHours(p.intValue()); + return ZoneOffset.ofHours(p.intValue()); } }, new ParseField("time_zone"), ObjectParser.ValueType.LONG); CompositeValuesSourceParserHelper.declareValuesSourceFields(PARSER, ValueType.NUMERIC); @@ -82,7 +84,7 @@ static DateHistogramValuesSourceBuilder parse(String name, XContentParser parser } private long interval = 0; - private DateTimeZone timeZone = null; + private ZoneId timeZone = null; private DateHistogramInterval dateHistogramInterval; public DateHistogramValuesSourceBuilder(String name) { @@ -93,8 +95,10 @@ protected DateHistogramValuesSourceBuilder(StreamInput in) throws IOException { super(in); this.interval = in.readLong(); this.dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); - if (in.readBoolean()) { - timeZone = DateTimeZone.forID(in.readString()); + if (in.getVersion().before(Version.V_7_0_0)) { + this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); + } else { + this.timeZone = in.readOptionalZoneId(); } } @@ -102,10 +106,10 @@ protected DateHistogramValuesSourceBuilder(StreamInput in) throws IOException { protected void innerWriteTo(StreamOutput out) throws IOException { out.writeLong(interval); out.writeOptionalWriteable(dateHistogramInterval); - boolean hasTimeZone = timeZone != null; - out.writeBoolean(hasTimeZone); - if (hasTimeZone) { - out.writeString(timeZone.getID()); + if (out.getVersion().before(Version.V_7_0_0)) { + out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); + } else { + out.writeOptionalZoneId(timeZone); } } @@ -176,7 +180,7 @@ public DateHistogramValuesSourceBuilder dateHistogramInterval(DateHistogramInter /** * Sets the time zone to use for this aggregation */ - public DateHistogramValuesSourceBuilder timeZone(DateTimeZone timeZone) { + public DateHistogramValuesSourceBuilder timeZone(ZoneId timeZone) { if (timeZone == null) { throw new IllegalArgumentException("[timeZone] must not be null: [" + name + "]"); } @@ -187,14 +191,14 @@ public DateHistogramValuesSourceBuilder timeZone(DateTimeZone timeZone) { /** * Gets the time zone to use for this aggregation */ - public DateTimeZone timeZone() { + public ZoneId timeZone() { return timeZone; } private Rounding createRounding() { Rounding.Builder tzRoundingBuilder; if (dateHistogramInterval != null) { - DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); + Rounding.DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); if (dateTimeUnit != null) { tzRoundingBuilder = Rounding.builder(dateTimeUnit); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 26fcfed9a262f..c5a1ab50b8230 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -69,7 +69,7 @@ public class InternalComposite public InternalComposite(StreamInput in) throws IOException { super(in); this.size = in.readVInt(); - this.sourceNames = in.readList(StreamInput::readString); + this.sourceNames = in.readStringList(); this.formats = new ArrayList<>(sourceNames.size()); for (int i = 0; i < sourceNames.size(); i++) { if (in.getVersion().onOrAfter(Version.V_6_3_0)) { @@ -90,7 +90,7 @@ public InternalComposite(StreamInput in) throws IOException { @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(size); - out.writeStringList(sourceNames); + out.writeStringCollection(sourceNames); if (out.getVersion().onOrAfter(Version.V_6_3_0)) { for (DocValueFormat format : formats) { out.writeNamedWriteable(format); @@ -226,7 +226,7 @@ InternalBucket next() { } } - static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket + public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements CompositeAggregation.Bucket, KeyComparable { private final CompositeKey key; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java index 635690c44f49e..9ee142fcd2fd5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; -import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.Rounding; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.search.aggregations.support.ValuesSource; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java index 5ee89ee05eef0..f5127c31bce65 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java @@ -50,4 +50,4 @@ public String getWriteableName() { protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { return new InternalFilter(name, docCount, subAggregations, pipelineAggregators(), getMetaData()); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java index 8246d629bd527..850bc6e7bafcb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java @@ -34,7 +34,7 @@ import java.util.List; import java.util.Objects; -class GeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoHashGrid.Bucket, Comparable { +public class GeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoHashGrid.Bucket, Comparable { protected long geohashAsLong; protected long docCount; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 87ba80af9a4b0..794ce066ed76e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -20,11 +20,10 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -42,9 +41,9 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.internal.SearchContext; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Arrays; import java.util.Map; import java.util.Objects; @@ -70,19 +69,19 @@ public class AutoDateHistogramAggregationBuilder * The current implementation probably should not be invoked in a tight loop. * @return Array of RoundingInfo */ - static RoundingInfo[] buildRoundings(DateTimeZone timeZone) { + static RoundingInfo[] buildRoundings(ZoneId timeZone) { RoundingInfo[] roundings = new RoundingInfo[6]; - roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE, timeZone), - 1000L, "s" , 1, 5, 10, 30); - roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR, timeZone), + roundings[0] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.SECOND_OF_MINUTE, timeZone), + 1000L, "s", 1, 5, 10, 30); + roundings[1] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.MINUTES_OF_HOUR, timeZone), 60 * 1000L, "m", 1, 5, 10, 30); - roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY, timeZone), - 60 * 60 * 1000L, "h", 1, 3, 12); - roundings[3] = new RoundingInfo(createRounding(DateTimeUnit.DAY_OF_MONTH, timeZone), + roundings[2] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.HOUR_OF_DAY, timeZone), + 60 * 60 * 1000L, "h",1, 3, 12); + roundings[3] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.DAY_OF_MONTH, timeZone), 24 * 60 * 60 * 1000L, "d", 1, 7); - roundings[4] = new RoundingInfo(createRounding(DateTimeUnit.MONTH_OF_YEAR, timeZone), + roundings[4] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.MONTH_OF_YEAR, timeZone), 30 * 24 * 60 * 60 * 1000L, "M", 1, 3); - roundings[5] = new RoundingInfo(createRounding(DateTimeUnit.YEAR_OF_CENTURY, timeZone), + roundings[5] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.YEAR_OF_CENTURY, timeZone), 365 * 24 * 60 * 60 * 1000L, "y", 1, 5, 10, 20, 50, 100); return roundings; } @@ -156,7 +155,7 @@ public int getNumBuckets() { return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, subFactoriesBuilder, metaData); } - static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) { + static Rounding createRounding(Rounding.DateTimeUnit interval, ZoneId timeZone) { Rounding.Builder tzRoundingBuilder = Rounding.builder(interval); if (timeZone != null) { tzRoundingBuilder.timeZone(timeZone); @@ -196,7 +195,7 @@ public RoundingInfo(Rounding rounding, long roughEstimateDurationMillis, String } public RoundingInfo(StreamInput in) throws IOException { - rounding = Rounding.Streams.read(in); + rounding = Rounding.read(in); roughEstimateDurationMillis = in.readVLong(); innerIntervals = in.readIntArray(); unitAbbreviation = in.readString(); @@ -204,7 +203,7 @@ public RoundingInfo(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - Rounding.Streams.write(rounding, out); + rounding.writeTo(out); out.writeVLong(roughEstimateDurationMillis); out.writeIntArray(innerIntervals); out.writeString(unitAbbreviation); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index 81bb70bd9672a..1b982ea9deca2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -23,8 +23,8 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 5199313e0aca1..6d7852a864453 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -23,10 +23,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.unit.TimeValue; @@ -54,10 +53,12 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.internal.SearchContext; -import org.joda.time.DateTimeField; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.zone.ZoneOffsetTransition; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -70,29 +71,30 @@ */ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder implements MultiBucketAggregationBuilder { + public static final String NAME = "date_histogram"; private static DateMathParser EPOCH_MILLIS_PARSER = DateFormatter.forPattern("epoch_millis").toDateMathParser(); - public static final Map DATE_FIELD_UNITS; + public static final Map DATE_FIELD_UNITS; static { - Map dateFieldUnits = new HashMap<>(); - dateFieldUnits.put("year", DateTimeUnit.YEAR_OF_CENTURY); - dateFieldUnits.put("1y", DateTimeUnit.YEAR_OF_CENTURY); - dateFieldUnits.put("quarter", DateTimeUnit.QUARTER); - dateFieldUnits.put("1q", DateTimeUnit.QUARTER); - dateFieldUnits.put("month", DateTimeUnit.MONTH_OF_YEAR); - dateFieldUnits.put("1M", DateTimeUnit.MONTH_OF_YEAR); - dateFieldUnits.put("week", DateTimeUnit.WEEK_OF_WEEKYEAR); - dateFieldUnits.put("1w", DateTimeUnit.WEEK_OF_WEEKYEAR); - dateFieldUnits.put("day", DateTimeUnit.DAY_OF_MONTH); - dateFieldUnits.put("1d", DateTimeUnit.DAY_OF_MONTH); - dateFieldUnits.put("hour", DateTimeUnit.HOUR_OF_DAY); - dateFieldUnits.put("1h", DateTimeUnit.HOUR_OF_DAY); - dateFieldUnits.put("minute", DateTimeUnit.MINUTES_OF_HOUR); - dateFieldUnits.put("1m", DateTimeUnit.MINUTES_OF_HOUR); - dateFieldUnits.put("second", DateTimeUnit.SECOND_OF_MINUTE); - dateFieldUnits.put("1s", DateTimeUnit.SECOND_OF_MINUTE); + Map dateFieldUnits = new HashMap<>(); + dateFieldUnits.put("year", Rounding.DateTimeUnit.YEAR_OF_CENTURY); + dateFieldUnits.put("1y", Rounding.DateTimeUnit.YEAR_OF_CENTURY); + dateFieldUnits.put("quarter", Rounding.DateTimeUnit.QUARTER_OF_YEAR); + dateFieldUnits.put("1q", Rounding.DateTimeUnit.QUARTER_OF_YEAR); + dateFieldUnits.put("month", Rounding.DateTimeUnit.MONTH_OF_YEAR); + dateFieldUnits.put("1M", Rounding.DateTimeUnit.MONTH_OF_YEAR); + dateFieldUnits.put("week", Rounding.DateTimeUnit.WEEK_OF_WEEKYEAR); + dateFieldUnits.put("1w", Rounding.DateTimeUnit.WEEK_OF_WEEKYEAR); + dateFieldUnits.put("day", Rounding.DateTimeUnit.DAY_OF_MONTH); + dateFieldUnits.put("1d", Rounding.DateTimeUnit.DAY_OF_MONTH); + dateFieldUnits.put("hour", Rounding.DateTimeUnit.HOUR_OF_DAY); + dateFieldUnits.put("1h", Rounding.DateTimeUnit.HOUR_OF_DAY); + dateFieldUnits.put("minute", Rounding.DateTimeUnit.MINUTES_OF_HOUR); + dateFieldUnits.put("1m", Rounding.DateTimeUnit.MINUTES_OF_HOUR); + dateFieldUnits.put("second", Rounding.DateTimeUnit.SECOND_OF_MINUTE); + dateFieldUnits.put("1s", Rounding.DateTimeUnit.SECOND_OF_MINUTE); DATE_FIELD_UNITS = unmodifiableMap(dateFieldUnits); } @@ -369,11 +371,11 @@ public String getType() { * coordinating node in order to generate missing buckets, which may cross a transition * even though data on the shards doesn't. */ - DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { - final DateTimeZone tz = timeZone(); + ZoneId rewriteTimeZone(QueryShardContext context) throws IOException { + final ZoneId tz = timeZone(); if (field() != null && tz != null && - tz.isFixed() == false && + tz.getRules().isFixedOffset() == false && field() != null && script() == null) { final MappedFieldType ft = context.fieldMapper(field()); @@ -391,16 +393,29 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { } if (anyInstant != null) { - final long prevTransition = tz.previousTransition(anyInstant); - final long nextTransition = tz.nextTransition(anyInstant); + Instant instant = Instant.ofEpochMilli(anyInstant); + ZoneOffsetTransition prevOffsetTransition = tz.getRules().previousTransition(instant); + final long prevTransition; + if (prevOffsetTransition != null) { + prevTransition = prevOffsetTransition.getInstant().toEpochMilli(); + } else { + prevTransition = instant.toEpochMilli(); + } + ZoneOffsetTransition nextOffsetTransition = tz.getRules().nextTransition(instant); + final long nextTransition; + if (nextOffsetTransition != null) { + nextTransition = nextOffsetTransition.getInstant().toEpochMilli(); + } else { + nextTransition = instant.toEpochMilli(); + } // We need all not only values but also rounded values to be within // [prevTransition, nextTransition]. final long low; - DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); + Rounding.DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); if (intervalAsUnit != null) { - final DateTimeField dateTimeField = intervalAsUnit.field(tz); - low = dateTimeField.roundCeiling(prevTransition); + Rounding rounding = Rounding.builder(intervalAsUnit).timeZone(timeZone()).build(); + low = rounding.nextRoundingValue(prevTransition); } else { final TimeValue intervalAsMillis = getIntervalAsTimeValue(); low = Math.addExact(prevTransition, intervalAsMillis.millis()); @@ -408,12 +423,12 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { // rounding rounds down, so 'nextTransition' is a good upper bound final long high = nextTransition; - if (ft.isFieldWithinQuery(reader, low, high, true, false, DateTimeZone.UTC, EPOCH_MILLIS_PARSER, + if (ft.isFieldWithinQuery(reader, low, high, true, false, ZoneOffset.UTC, EPOCH_MILLIS_PARSER, context) == Relation.WITHIN) { // All values in this reader have the same offset despite daylight saving times. // This is very common for location-based timezones such as Europe/Paris in // combination with time-based indices. - return DateTimeZone.forOffsetMillis(tz.getOffset(anyInstant)); + return ZoneOffset.ofTotalSeconds(tz.getRules().getOffset(instant).getTotalSeconds()); } } } @@ -424,9 +439,9 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { @Override protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - final DateTimeZone tz = timeZone(); + final ZoneId tz = timeZone(); final Rounding rounding = createRounding(tz); - final DateTimeZone rewrittenTimeZone = rewriteTimeZone(context.getQueryShardContext()); + final ZoneId rewrittenTimeZone = rewriteTimeZone(context.getQueryShardContext()); final Rounding shardRounding; if (tz == rewrittenTimeZone) { shardRounding = rounding; @@ -447,7 +462,7 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { * {@code null} then it means that the interval is expressed as a fixed * {@link TimeValue} and may be accessed via * {@link #getIntervalAsTimeValue()}. */ - private DateTimeUnit getIntervalAsDateTimeUnit() { + private Rounding.DateTimeUnit getIntervalAsDateTimeUnit() { if (dateHistogramInterval != null) { return DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); } @@ -466,9 +481,9 @@ private TimeValue getIntervalAsTimeValue() { } } - private Rounding createRounding(DateTimeZone timeZone) { + private Rounding createRounding(ZoneId timeZone) { Rounding.Builder tzRoundingBuilder; - DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); + Rounding.DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); if (intervalAsUnit != null) { tzRoundingBuilder = Rounding.builder(intervalAsUnit); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 735a6717210a5..0c7a91505ae88 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -23,8 +23,8 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index c7ad6de7e0d72..8c025eb34eeb3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; -import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.Rounding; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java index 4cecfeff83381..b0dfbb9d66e9d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java @@ -21,10 +21,10 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentFragment; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java index f2e450942c3ad..63d08f5e832ac 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -19,9 +19,9 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregations; @@ -32,10 +32,10 @@ import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -108,7 +108,7 @@ public String getKeyAsString() { @Override public Object getKey() { - return new DateTime(key, DateTimeZone.UTC); + return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 496f8efc60ccf..2fa7f15a703ec 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -20,9 +20,9 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregations; @@ -34,10 +34,10 @@ import org.elasticsearch.search.aggregations.KeyComparable; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -112,7 +112,7 @@ public String getKeyAsString() { @Override public Object getKey() { - return new DateTime(key, DateTimeZone.UTC); + return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } @Override @@ -185,13 +185,13 @@ static class EmptyBucketInfo { } EmptyBucketInfo(StreamInput in) throws IOException { - rounding = Rounding.Streams.read(in); + rounding = Rounding.read(in); subAggregations = InternalAggregations.readAggregations(in); bounds = in.readOptionalWriteable(ExtendedBounds::new); } void writeTo(StreamOutput out) throws IOException { - Rounding.Streams.write(rounding, out); + rounding.writeTo(out); subAggregations.writeTo(out); out.writeOptionalWriteable(bounds); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java index c9ff1389f8ad3..66a29b4e05073 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java @@ -24,10 +24,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.List; public class ParsedAutoDateHistogram extends ParsedMultiBucketAggregation implements Histogram { @@ -83,7 +83,7 @@ public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBuck @Override public Object getKey() { if (key != null) { - return new DateTime(key, DateTimeZone.UTC); + return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } return null; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedDateHistogram.java index ace0cb59907a8..1cf43a53ed26c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedDateHistogram.java @@ -23,10 +23,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.List; public class ParsedDateHistogram extends ParsedMultiBucketAggregation implements Histogram { @@ -62,7 +62,7 @@ public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBuck @Override public Object getKey() { if (key != null) { - return new DateTime(key, DateTimeZone.UTC); + return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } return null; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java index b5bdba85b78ef..2b5e92ddcb3f9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java @@ -30,9 +30,9 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.internal.SearchContext; -import org.joda.time.DateTime; import java.io.IOException; +import java.time.ZonedDateTime; import java.util.Map; public class DateRangeAggregationBuilder extends AbstractRangeBuilder { @@ -224,24 +224,24 @@ public DateRangeAggregationBuilder addUnboundedFrom(double from) { * @param to * the upper bound on the dates, exclusive */ - public DateRangeAggregationBuilder addRange(String key, DateTime from, DateTime to) { + public DateRangeAggregationBuilder addRange(String key, ZonedDateTime from, ZonedDateTime to) { addRange(new RangeAggregator.Range(key, convertDateTime(from), convertDateTime(to))); return this; } - private static Double convertDateTime(DateTime dateTime) { + private static Double convertDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } else { - return (double) dateTime.getMillis(); + return (double) dateTime.toInstant().toEpochMilli(); } } /** - * Same as {@link #addRange(String, DateTime, DateTime)} but the key will be + * Same as {@link #addRange(String, ZonedDateTime, ZonedDateTime)} but the key will be * automatically generated based on from and to. */ - public DateRangeAggregationBuilder addRange(DateTime from, DateTime to) { + public DateRangeAggregationBuilder addRange(ZonedDateTime from, ZonedDateTime to) { return addRange(null, from, to); } @@ -253,16 +253,16 @@ public DateRangeAggregationBuilder addRange(DateTime from, DateTime to) { * @param to * the upper bound on the dates, exclusive */ - public DateRangeAggregationBuilder addUnboundedTo(String key, DateTime to) { + public DateRangeAggregationBuilder addUnboundedTo(String key, ZonedDateTime to) { addRange(new RangeAggregator.Range(key, null, convertDateTime(to))); return this; } /** - * Same as {@link #addUnboundedTo(String, DateTime)} but the key will be + * Same as {@link #addUnboundedTo(String, ZonedDateTime)} but the key will be * computed automatically. */ - public DateRangeAggregationBuilder addUnboundedTo(DateTime to) { + public DateRangeAggregationBuilder addUnboundedTo(ZonedDateTime to) { return addUnboundedTo(null, to); } @@ -274,16 +274,16 @@ public DateRangeAggregationBuilder addUnboundedTo(DateTime to) { * @param from * the lower bound on the distances, inclusive */ - public DateRangeAggregationBuilder addUnboundedFrom(String key, DateTime from) { + public DateRangeAggregationBuilder addUnboundedFrom(String key, ZonedDateTime from) { addRange(new RangeAggregator.Range(key, convertDateTime(from), null)); return this; } /** - * Same as {@link #addUnboundedFrom(String, DateTime)} but the key will be + * Same as {@link #addUnboundedFrom(String, ZonedDateTime)} but the key will be * computed automatically. */ - public DateRangeAggregationBuilder addUnboundedFrom(DateTime from) { + public DateRangeAggregationBuilder addUnboundedFrom(ZonedDateTime from) { return addUnboundedFrom(null, from); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java index 408c1325b85c9..a354aaeadbac0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java @@ -24,10 +24,10 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValueType; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.List; import java.util.Map; @@ -48,12 +48,14 @@ public Bucket(String key, double from, double to, long docCount, InternalAggrega @Override public Object getFrom() { - return Double.isInfinite(((Number) from).doubleValue()) ? null : new DateTime(((Number) from).longValue(), DateTimeZone.UTC); + return Double.isInfinite(((Number) from).doubleValue()) ? null : + Instant.ofEpochMilli(((Number) from).longValue()).atZone(ZoneOffset.UTC); } @Override public Object getTo() { - return Double.isInfinite(((Number) to).doubleValue()) ? null : new DateTime(((Number) to).longValue(), DateTimeZone.UTC); + return Double.isInfinite(((Number) to).doubleValue()) ? null : + Instant.ofEpochMilli(((Number) to).longValue()).atZone(ZoneOffset.UTC); } private Double internalGetFrom() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedDateRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedDateRange.java index 68adc41d23765..d4504e245541b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedDateRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedDateRange.java @@ -21,10 +21,11 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; public class ParsedDateRange extends ParsedRange { @@ -59,11 +60,11 @@ public Object getTo() { return doubleAsDateTime(to); } - private static DateTime doubleAsDateTime(Double d) { + private static ZonedDateTime doubleAsDateTime(Double d) { if (d == null || Double.isInfinite(d)) { return null; } - return new DateTime(d.longValue(), DateTimeZone.UTC); + return Instant.ofEpochMilli(d.longValue()).atZone(ZoneOffset.UTC); } static ParsedBucket fromXContent(final XContentParser parser, final boolean keyed) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java index 1a04133e82bfc..da72fec8511be 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java @@ -59,4 +59,4 @@ protected InternalSingleBucketAggregation newAggregation(String name, long docCo InternalAggregations subAggregations) { return new InternalSampler(name, docCount, subAggregations, pipelineAggregators(), metaData); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java index 7050254f2793f..b3bbe6b911c48 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java @@ -98,6 +98,10 @@ public long getEstimatedMemoryFootprint() { return state.getEstimatedFootprintInBytes(); } + DoubleHistogram getState() { + return state; + } + @Override public AbstractInternalHDRPercentiles doReduce(List aggregations, ReduceContext reduceContext) { DoubleHistogram merged = null; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java index 6e6ff3cf3a88b..cc63e5f7a4325 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java @@ -82,6 +82,10 @@ public long getEstimatedMemoryFootprint() { return state.byteSize(); } + TDigestState getState() { + return state; + } + @Override public AbstractInternalTDigestPercentiles doReduce(List aggregations, ReduceContext reduceContext) { TDigestState merged = null; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java index b3fcb33a4fb84..144777379b5ad 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java @@ -79,6 +79,10 @@ public long getValue() { return counts == null ? 0 : counts.cardinality(0); } + HyperLogLogPlusPlus getCounts() { + return counts; + } + @Override public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { InternalCardinality reduced = null; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java index 01ab3a323e12d..ace9edb13f515 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java @@ -121,7 +121,7 @@ public String getWriteableName() { return MedianAbsoluteDeviationAggregationBuilder.NAME; } - public TDigestState getValuesSketch() { + TDigestState getValuesSketch() { return valuesSketch; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java index ec2419e03ab3c..a0bb5e74fd5cb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java @@ -80,6 +80,10 @@ public Object aggregation() { return aggregation.get(0); } + List getAggregation() { + return aggregation; + } + @Override public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { List aggregationObjects = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricInspectionHelper.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricInspectionHelper.java new file mode 100644 index 0000000000000..88812cda9899a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricInspectionHelper.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.metrics; + +import org.elasticsearch.search.aggregations.pipeline.InternalDerivative; + +/** + * Counterpart to {@link org.elasticsearch.search.aggregations.support.AggregationInspectionHelper}, providing + * helpers for some aggs that have package-private getters. AggregationInspectionHelper delegates to these + * helpers when needed, and consumers should prefer to use AggregationInspectionHelper instead of these + * helpers. + */ +public class MetricInspectionHelper { + + public static boolean hasValue(InternalAvg agg) { + return agg.getCount() > 0; + } + + public static boolean hasValue(InternalCardinality agg) { + return agg.getCounts() != null; + } + + public static boolean hasValue(InternalHDRPercentileRanks agg) { + return agg.getState().getTotalCount() > 0; + } + + public static boolean hasValue(InternalHDRPercentiles agg) { + return agg.getState().getTotalCount() > 0; + } + + public static boolean hasValue(InternalMedianAbsoluteDeviation agg) { + return agg.getValuesSketch().size() > 0; + } + + public static boolean hasValue(InternalScriptedMetric agg) { + // TODO better way to know if the scripted metric received documents? + // Could check for null too, but a script might return null on purpose... + return agg.getAggregation().size() > 0 ; + } + + public static boolean hasValue(InternalTDigestPercentileRanks agg) { + return agg.getState().size() > 0; + } + + public static boolean hasValue(InternalTDigestPercentiles agg) { + return agg.getState().size() > 0; + } + + public static boolean hasValue(InternalTopHits agg) { + return (agg.getHits().getTotalHits().value == 0 + && Double.isNaN(agg.getHits().getMaxScore()) + && Double.isNaN(agg.getTopDocs().maxScore)) == false; + } + + public static boolean hasValue(InternalWeightedAvg agg) { + return (agg.getSum() == 0.0 && agg.getWeight() == 0L) == false; + } + + public static boolean hasValue(InternalDerivative agg) { + return true; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index debbacdc6196c..ba51099d6bc00 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -66,6 +67,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder> sorts = null; private HighlightBuilder highlightBuilder; @@ -85,6 +87,7 @@ protected TopHitsAggregationBuilder(TopHitsAggregationBuilder clone, this.size = clone.size; this.explain = clone.explain; this.version = clone.version; + this.seqNoAndPrimaryTerm = clone.seqNoAndPrimaryTerm; this.trackScores = clone.trackScores; this.sorts = clone.sorts == null ? null : new ArrayList<>(clone.sorts); this.highlightBuilder = clone.highlightBuilder == null ? null : @@ -137,6 +140,9 @@ public TopHitsAggregationBuilder(StreamInput in) throws IOException { } trackScores = in.readBoolean(); version = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + seqNoAndPrimaryTerm = in.readBoolean(); + } } @Override @@ -173,6 +179,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { } out.writeBoolean(trackScores); out.writeBoolean(version); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(seqNoAndPrimaryTerm); + } } /** @@ -526,6 +535,23 @@ public boolean version() { return version; } + /** + * Should each {@link org.elasticsearch.search.SearchHit} be returned with the + * sequence number and primary term of the last modification of the document. + */ + public TopHitsAggregationBuilder seqNoAndPrimaryTerm(Boolean seqNoAndPrimaryTerm) { + this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; + return this; + } + + /** + * Indicates whether {@link org.elasticsearch.search.SearchHit}s should be returned with the + * sequence number and primary term of the last modification of the document. + */ + public Boolean seqNoAndPrimaryTerm() { + return seqNoAndPrimaryTerm; + } + /** * Applies when sorting, and controls if scores will be tracked as well. * Defaults to {@code false}. @@ -579,8 +605,9 @@ protected TopHitsAggregatorFactory doBuild(SearchContext context, AggregatorFact } else { optionalSort = SortBuilder.buildSort(sorts, context.getQueryShardContext()); } - return new TopHitsAggregatorFactory(name, from, size, explain, version, trackScores, optionalSort, highlightBuilder, - storedFieldsContext, docValueFields, fields, fetchSourceContext, context, parent, subfactoriesBuilder, metaData); + return new TopHitsAggregatorFactory(name, from, size, explain, version, seqNoAndPrimaryTerm, trackScores, optionalSort, + highlightBuilder, storedFieldsContext, docValueFields, fields, fetchSourceContext, context, parent, subfactoriesBuilder, + metaData); } @Override @@ -589,6 +616,7 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param builder.field(SearchSourceBuilder.FROM_FIELD.getPreferredName(), from); builder.field(SearchSourceBuilder.SIZE_FIELD.getPreferredName(), size); builder.field(SearchSourceBuilder.VERSION_FIELD.getPreferredName(), version); + builder.field(SearchSourceBuilder.SEQ_NO_PRIMARY_TERM_FIELD.getPreferredName(), seqNoAndPrimaryTerm); builder.field(SearchSourceBuilder.EXPLAIN_FIELD.getPreferredName(), explain); if (fetchSourceContext != null) { builder.field(SearchSourceBuilder._SOURCE_FIELD.getPreferredName(), fetchSourceContext); @@ -646,6 +674,8 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa factory.size(parser.intValue()); } else if (SearchSourceBuilder.VERSION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.version(parser.booleanValue()); + } else if (SearchSourceBuilder.SEQ_NO_PRIMARY_TERM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + factory.seqNoAndPrimaryTerm(parser.booleanValue()); } else if (SearchSourceBuilder.EXPLAIN_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.explain(parser.booleanValue()); } else if (SearchSourceBuilder.TRACK_SCORES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { @@ -745,7 +775,7 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa @Override protected int doHashCode() { return Objects.hash(explain, fetchSourceContext, docValueFields, storedFieldsContext, from, highlightBuilder, - scriptFields, size, sorts, trackScores, version); + scriptFields, size, sorts, trackScores, version, seqNoAndPrimaryTerm); } @Override @@ -761,7 +791,8 @@ protected boolean doEquals(Object obj) { && Objects.equals(size, other.size) && Objects.equals(sorts, other.sorts) && Objects.equals(trackScores, other.trackScores) - && Objects.equals(version, other.version); + && Objects.equals(version, other.version) + && Objects.equals(seqNoAndPrimaryTerm, other.seqNoAndPrimaryTerm); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java index 6086942955122..7edaccb66d4bd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java @@ -44,6 +44,7 @@ class TopHitsAggregatorFactory extends AggregatorFactory sort; private final HighlightBuilder highlightBuilder; @@ -52,8 +53,8 @@ class TopHitsAggregatorFactory extends AggregatorFactory scriptFields; private final FetchSourceContext fetchSourceContext; - TopHitsAggregatorFactory(String name, int from, int size, boolean explain, boolean version, boolean trackScores, - Optional sort, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext, + TopHitsAggregatorFactory(String name, int from, int size, boolean explain, boolean version, boolean seqNoAndPrimaryTerm, + boolean trackScores, Optional sort, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext, List docValueFields, List scriptFields, FetchSourceContext fetchSourceContext, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { @@ -62,6 +63,7 @@ class TopHitsAggregatorFactory extends AggregatorFactory aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map( (p) -> (InternalAggregation) p).collect(Collectors.toList()); - aggs.add(new InternalSimpleValue(name(), returned.doubleValue(), formatter, new ArrayList<>(), metaData())); + + InternalSimpleValue simpleValue = new InternalSimpleValue(name(), returned.doubleValue(), + formatter, new ArrayList<>(), metaData()); + aggs.add(simpleValue); InternalMultiBucketAggregation.InternalBucket newBucket = originalAgg.createBucket(new InternalAggregations(aggs), bucket); newBuckets.add(newBucket); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java index a70144b421a48..54e772b0d8e82 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java @@ -83,8 +83,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext sum += thisBucketValue; } - List aggs = StreamSupport - .stream(bucket.getAggregations().spliterator(), false) + List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) .map((p) -> (InternalAggregation) p) .collect(Collectors.toList()); aggs.add(new InternalSimpleValue(name(), sum, formatter, new ArrayList<>(), metaData())); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java index b8785d0bf7045..68ec9085df52a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java @@ -21,9 +21,9 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -34,7 +34,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.ArrayList; @@ -139,9 +138,9 @@ protected PipelineAggregator createInternal(Map metaData) throws } Long xAxisUnits = null; if (units != null) { - DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(units); + Rounding.DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(units); if (dateTimeUnit != null) { - xAxisUnits = dateTimeUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); + xAxisUnits = dateTimeUnit.getField().getBaseUnit().getDuration().toMillis(); } else { TimeValue timeValue = TimeValue.parseTimeValue(units, null, getClass().getSimpleName() + ".unit"); if (timeValue != null) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java index 10d1cdc5a71cd..1f36c989c163d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java @@ -117,7 +117,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext // since we only change newBucket if we can add to it Bucket newBucket = bucket; - if (!(thisBucketValue == null || thisBucketValue.equals(Double.NaN))) { + if ((thisBucketValue == null || thisBucketValue.equals(Double.NaN)) == false) { // Some models (e.g. HoltWinters) have certain preconditions that must be met if (model.hasValue(values.size())) { @@ -126,7 +126,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) .map((p) -> (InternalAggregation) p) .collect(Collectors.toList()); - aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList(), metaData())); + aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList<>(), metaData())); newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs)); } @@ -153,10 +153,10 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext Bucket bucket = newBuckets.get(lastValidPosition + i + 1); // Get the existing aggs in the bucket so we don't clobber data - aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> { - return (InternalAggregation) p; - }).collect(Collectors.toList()); - aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList(), metaData())); + aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) + .map((p) -> (InternalAggregation) p) + .collect(Collectors.toList()); + aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<>(), metaData())); Bucket newBucket = factory.createBucket(newKey, bucket.getDocCount(), new InternalAggregations(aggs)); @@ -166,7 +166,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext } else { // Not seen before, create fresh aggs = new ArrayList<>(); - aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList(), metaData())); + aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<>(), metaData())); Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(aggs)); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java new file mode 100644 index 0000000000000..c41fa29bde3db --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java @@ -0,0 +1,242 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.support; + +import org.elasticsearch.search.aggregations.bucket.adjacency.InternalAdjacencyMatrix; +import org.elasticsearch.search.aggregations.bucket.composite.InternalComposite; +import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; +import org.elasticsearch.search.aggregations.bucket.filter.InternalFilters; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.bucket.missing.InternalMissing; +import org.elasticsearch.search.aggregations.bucket.nested.InternalNested; +import org.elasticsearch.search.aggregations.bucket.nested.InternalReverseNested; +import org.elasticsearch.search.aggregations.bucket.range.InternalRange; +import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler; +import org.elasticsearch.search.aggregations.bucket.sampler.UnmappedSampler; +import org.elasticsearch.search.aggregations.bucket.significant.InternalSignificantTerms; +import org.elasticsearch.search.aggregations.bucket.significant.UnmappedSignificantTerms; +import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms; +import org.elasticsearch.search.aggregations.bucket.terms.UnmappedTerms; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.search.aggregations.metrics.InternalCardinality; +import org.elasticsearch.search.aggregations.metrics.InternalExtendedStats; +import org.elasticsearch.search.aggregations.metrics.InternalGeoBounds; +import org.elasticsearch.search.aggregations.metrics.InternalGeoCentroid; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.InternalMedianAbsoluteDeviation; +import org.elasticsearch.search.aggregations.metrics.InternalMin; +import org.elasticsearch.search.aggregations.metrics.InternalScriptedMetric; +import org.elasticsearch.search.aggregations.metrics.InternalStats; +import org.elasticsearch.search.aggregations.metrics.InternalSum; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; +import org.elasticsearch.search.aggregations.metrics.InternalTopHits; +import org.elasticsearch.search.aggregations.metrics.InternalValueCount; +import org.elasticsearch.search.aggregations.metrics.InternalWeightedAvg; +import org.elasticsearch.search.aggregations.metrics.MetricInspectionHelper; +import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.pipeline.InternalPercentilesBucket; +import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; + +import java.util.stream.StreamSupport; + +/** + * Provides a set of static helpers to determine if a particular type of InternalAggregation "has a value" + * or not. This can be difficult to determine from an external perspective because each agg uses + * different internal bookkeeping to determine if it is empty or not (NaN, +/-Inf, 0.0, etc). + * + * This set of helpers aim to ease that task by codifying what "empty" is for each agg. + * + * It is not entirely accurate for all aggs, since some do not expose or track the needed state + * (e.g. sum doesn't record count, so it's not clear if the sum is 0.0 because it is empty + * or because of summing to zero). Pipeline aggs in particular are not well supported + * by these helpers since most share InternalSimpleValue and it's not clear which pipeline + * generated the value. + */ +public class AggregationInspectionHelper { + public static , B extends InternalTerms.Bucket> boolean hasValue(InternalTerms agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(UnmappedTerms agg) { + return false; + } + + public static boolean hasValue(UnmappedSignificantTerms agg) { + return false; + } + + public static boolean hasValue(UnmappedSampler agg) { + return false; + } + + public static boolean hasValue(InternalAdjacencyMatrix agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalFilters agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalFilter agg) { + return agg.getDocCount() > 0; + } + + public static boolean hasValue(InternalGeoHashGrid agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalGlobal agg) { + return agg.getDocCount() > 0; + } + + public static boolean hasValue(InternalHistogram agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalDateHistogram agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalAutoDateHistogram agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalComposite agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalMissing agg) { + return agg.getDocCount() > 0; + } + + public static boolean hasValue(InternalNested agg) { + return agg.getDocCount() > 0; + } + + public static boolean hasValue(InternalReverseNested agg) { + return agg.getDocCount() > 0; + } + + public static > boolean hasValue(InternalRange agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalSampler agg) { + return agg.getDocCount() > 0; + } + + public static , + B extends InternalSignificantTerms.Bucket> boolean hasValue(InternalSignificantTerms agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalAvg agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalSum agg) { + // TODO this could be incorrect... e.g. +1 + -1 + return agg.getValue() != 0.0; + } + + public static boolean hasValue(InternalCardinality agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalExtendedStats agg) { + return agg.getCount() > 0; + } + + public static boolean hasValue(InternalGeoBounds agg) { + return (agg.topLeft() == null && agg.bottomRight() == null) == false; + } + + public static boolean hasValue(InternalGeoCentroid agg) { + return agg.centroid() != null && agg.count() > 0; + } + + public static boolean hasValue(InternalHDRPercentileRanks agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalHDRPercentiles agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalMax agg) { + return agg.getValue() != Double.NEGATIVE_INFINITY; + } + + public static boolean hasValue(InternalMedianAbsoluteDeviation agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalMin agg) { + return agg.getValue() != Double.POSITIVE_INFINITY; + } + + public static boolean hasValue(InternalScriptedMetric agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalStats agg) { + return agg.getCount() > 0; + } + + public static boolean hasValue(InternalTDigestPercentileRanks agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalTDigestPercentiles agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalTopHits agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalValueCount agg) { + return agg.getValue() > 0; + } + + public static boolean hasValue(InternalWeightedAvg agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalSimpleValue agg) { + // This is a coarse approximation, since some aggs use positive/negative infinity or NaN + return (Double.isInfinite(agg.getValue()) || Double.isNaN(agg.getValue())) == false; + } + + public static boolean hasValue(InternalBucketMetricValue agg) { + return Double.isInfinite(agg.value()) == false; + } + + public static boolean hasValue(InternalPercentilesBucket agg) { + return StreamSupport.stream(agg.spliterator(), false).allMatch(p -> Double.isNaN(p.getValue())) == false; + } + +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java index fbc3081758f96..de112c427a751 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java @@ -19,19 +19,22 @@ package org.elasticsearch.search.aggregations.support; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.Objects; import java.util.function.BiFunction; @@ -39,7 +42,7 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject private String fieldName; private Object missing; private Script script; - private DateTimeZone timeZone; + private ZoneId timeZone; private static final String NAME = "field_config"; @@ -62,16 +65,16 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject if (timezoneAware) { parser.declareField(MultiValuesSourceFieldConfig.Builder::setTimeZone, p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return DateTimeZone.forID(p.text()); + return ZoneId.of(p.text()); } else { - return DateTimeZone.forOffsetHours(p.intValue()); + return ZoneOffset.ofHours(p.intValue()); } }, ParseField.CommonFields.TIME_ZONE, ObjectParser.ValueType.LONG); } return parser; }; - private MultiValuesSourceFieldConfig(String fieldName, Object missing, Script script, DateTimeZone timeZone) { + private MultiValuesSourceFieldConfig(String fieldName, Object missing, Script script, ZoneId timeZone) { this.fieldName = fieldName; this.missing = missing; this.script = script; @@ -82,7 +85,11 @@ public MultiValuesSourceFieldConfig(StreamInput in) throws IOException { this.fieldName = in.readString(); this.missing = in.readGenericValue(); this.script = in.readOptionalWriteable(Script::new); - this.timeZone = in.readOptionalTimeZone(); + if (in.getVersion().before(Version.V_7_0_0)) { + this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); + } else { + this.timeZone = in.readOptionalZoneId(); + } } public Object getMissing() { @@ -93,7 +100,7 @@ public Script getScript() { return script; } - public DateTimeZone getTimeZone() { + public ZoneId getTimeZone() { return timeZone; } @@ -106,7 +113,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeGenericValue(missing); out.writeOptionalWriteable(script); - out.writeOptionalTimeZone(timeZone); + if (out.getVersion().before(Version.V_7_0_0)) { + out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); + } else { + out.writeOptionalZoneId(timeZone); + } } @Override @@ -122,7 +133,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(ParseField.CommonFields.FIELD.getPreferredName(), fieldName); } if (timeZone != null) { - builder.field(ParseField.CommonFields.TIME_ZONE.getPreferredName(), timeZone.getID()); + builder.field(ParseField.CommonFields.TIME_ZONE.getPreferredName(), timeZone.getId()); } builder.endObject(); return builder; @@ -153,7 +164,7 @@ public static class Builder { private String fieldName; private Object missing = null; private Script script = null; - private DateTimeZone timeZone = null; + private ZoneId timeZone = null; public String getFieldName() { return fieldName; @@ -182,11 +193,11 @@ public Builder setScript(Script script) { return this; } - public DateTimeZone getTimeZone() { + public ZoneId getTimeZone() { return timeZone; } - public Builder setTimeZone(DateTimeZone timeZone) { + public Builder setTimeZone(ZoneId timeZone) { this.timeZone = timeZone; return this; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index 25a90e581f00c..3cbd11288bffc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -28,9 +28,9 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneOffset; public enum ValueType implements Writeable { @@ -42,7 +42,7 @@ public enum ValueType implements Writeable { DOUBLE((byte) 3, "float|double", "double", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.RAW), NUMBER((byte) 4, "number", "number", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.RAW), DATE((byte) 5, "date", "date", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, - new DocValueFormat.DateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, DateTimeZone.UTC)), + new DocValueFormat.DateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, ZoneOffset.UTC)), IP((byte) 6, "ip", "ip", ValuesSourceType.BYTES, IndexFieldData.class, DocValueFormat.IP), // TODO: what is the difference between "number" and "numeric"? NUMERIC((byte) 7, "numeric", "numeric", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.RAW), diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 040cc1b542f07..d3abe6f3169ee 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -18,8 +18,10 @@ */ package org.elasticsearch.search.aggregations.support; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; @@ -28,9 +30,9 @@ import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.internal.SearchContext; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Map; import java.util.Objects; @@ -81,7 +83,7 @@ public final AB subAggregations(Builder subFactories) { private ValueType valueType = null; private String format = null; private Object missing = null; - private DateTimeZone timeZone = null; + private ZoneId timeZone = null; protected ValuesSourceConfig config; protected ValuesSourceAggregationBuilder(String name, ValuesSourceType valuesSourceType, ValueType targetValueType) { @@ -144,8 +146,10 @@ private void read(StreamInput in) throws IOException { } format = in.readOptionalString(); missing = in.readGenericValue(); - if (in.readBoolean()) { - timeZone = DateTimeZone.forID(in.readString()); + if (in.getVersion().before(Version.V_7_0_0)) { + timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); + } else { + timeZone = in.readOptionalZoneId(); } } @@ -167,10 +171,10 @@ protected final void doWriteTo(StreamOutput out) throws IOException { } out.writeOptionalString(format); out.writeGenericValue(missing); - boolean hasTimeZone = timeZone != null; - out.writeBoolean(hasTimeZone); - if (hasTimeZone) { - out.writeString(timeZone.getID()); + if (out.getVersion().before(Version.V_7_0_0)) { + out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); + } else { + out.writeOptionalZoneId(timeZone); } innerWriteTo(out); } @@ -289,7 +293,7 @@ public Object missing() { * Sets the time zone to use for this aggregation */ @SuppressWarnings("unchecked") - public AB timeZone(DateTimeZone timeZone) { + public AB timeZone(ZoneId timeZone) { if (timeZone == null) { throw new IllegalArgumentException("[timeZone] must not be null: [" + name + "]"); } @@ -300,7 +304,7 @@ public AB timeZone(DateTimeZone timeZone) { /** * Gets the time zone to use for this aggregation */ - public DateTimeZone timeZone() { + public ZoneId timeZone() { return timeZone; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index 39c53f39c7dac..82baa04fe8f1a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -32,7 +32,9 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionException; -import org.joda.time.DateTimeZone; + +import java.time.ZoneId; +import java.time.ZoneOffset; /** * A configuration that tells aggregations how to retrieve data from the index @@ -48,7 +50,7 @@ public static ValuesSourceConfig resolve( ValueType valueType, String field, Script script, Object missing, - DateTimeZone timeZone, + ZoneId timeZone, String format) { if (field == null) { @@ -121,7 +123,7 @@ private static AggregationScript.LeafFactory createScript(Script script, QuerySh } } - private static DocValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType, @Nullable DateTimeZone tz) { + private static DocValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType, @Nullable ZoneId tz) { if (valueType == null) { return DocValueFormat.RAW; // we can't figure it out } @@ -130,7 +132,7 @@ private static DocValueFormat resolveFormat(@Nullable String format, @Nullable V valueFormat = new DocValueFormat.Decimal(format); } if (valueFormat instanceof DocValueFormat.DateTime && format != null) { - valueFormat = new DocValueFormat.DateTime(DateFormatter.forPattern(format), tz != null ? tz : DateTimeZone.UTC); + valueFormat = new DocValueFormat.DateTime(DateFormatter.forPattern(format), tz != null ? tz : ZoneOffset.UTC); } return valueFormat; } @@ -142,7 +144,7 @@ private static DocValueFormat resolveFormat(@Nullable String format, @Nullable V private boolean unmapped = false; private DocValueFormat format = DocValueFormat.RAW; private Object missing; - private DateTimeZone timeZone; + private ZoneId timeZone; public ValuesSourceConfig(ValuesSourceType valueSourceType) { this.valueSourceType = valueSourceType; @@ -206,12 +208,12 @@ public Object missing() { return this.missing; } - public ValuesSourceConfig timezone(final DateTimeZone timeZone) { - this.timeZone= timeZone; + public ValuesSourceConfig timezone(final ZoneId timeZone) { + this.timeZone = timeZone; return this; } - public DateTimeZone timezone() { + public ZoneId timezone() { return this.timeZone; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java index fc0a2f3a9fefe..24bdffaa3fa89 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java @@ -25,7 +25,9 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; -import org.joda.time.DateTimeZone; + +import java.time.ZoneId; +import java.time.ZoneOffset; public final class ValuesSourceParserHelper { @@ -91,9 +93,9 @@ private static void declareFields( if (timezoneAware) { objectParser.declareField(ValuesSourceAggregationBuilder::timeZone, p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return DateTimeZone.forID(p.text()); + return ZoneId.of(p.text()); } else { - return DateTimeZone.forOffsetHours(p.intValue()); + return ZoneOffset.ofHours(p.intValue()); } }, ParseField.CommonFields.TIME_ZONE, ObjectParser.ValueType.LONG); } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 73f6aa2fbff81..8e072d36c2c33 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -92,6 +92,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R public static final ParseField POST_FILTER_FIELD = new ParseField("post_filter"); public static final ParseField MIN_SCORE_FIELD = new ParseField("min_score"); public static final ParseField VERSION_FIELD = new ParseField("version"); + public static final ParseField SEQ_NO_PRIMARY_TERM_FIELD = new ParseField("seq_no_primary_term"); public static final ParseField EXPLAIN_FIELD = new ParseField("explain"); public static final ParseField _SOURCE_FIELD = new ParseField("_source"); public static final ParseField STORED_FIELDS_FIELD = new ParseField("stored_fields"); @@ -151,6 +152,8 @@ public static HighlightBuilder highlight() { private Boolean version; + private Boolean seqNoAndPrimaryTerm; + private List> sorts; private boolean trackScores = false; @@ -240,13 +243,18 @@ public SearchSourceBuilder(StreamInput in) throws IOException { } } if (in.readBoolean()) { - stats = in.readList(StreamInput::readString); + stats = in.readStringList(); } suggestBuilder = in.readOptionalWriteable(SuggestBuilder::new); terminateAfter = in.readVInt(); timeout = in.readOptionalTimeValue(); trackScores = in.readBoolean(); version = in.readOptionalBoolean(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + seqNoAndPrimaryTerm = in.readOptionalBoolean(); + } else { + seqNoAndPrimaryTerm = null; + } extBuilders = in.readNamedWriteableList(SearchExtBuilder.class); profile = in.readBoolean(); searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); @@ -303,13 +311,16 @@ public void writeTo(StreamOutput out) throws IOException { boolean hasStats = stats != null; out.writeBoolean(hasStats); if (hasStats) { - out.writeStringList(stats); + out.writeStringCollection(stats); } out.writeOptionalWriteable(suggestBuilder); out.writeVInt(terminateAfter); out.writeOptionalTimeValue(timeout); out.writeBoolean(trackScores); out.writeOptionalBoolean(version); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeOptionalBoolean(seqNoAndPrimaryTerm); + } out.writeNamedWriteableList(extBuilders); out.writeBoolean(profile); out.writeOptionalWriteable(searchAfterBuilder); @@ -441,6 +452,23 @@ public Boolean version() { return version; } + /** + * Should each {@link org.elasticsearch.search.SearchHit} be returned with the + * sequence number and primary term of the last modification of the document. + */ + public SearchSourceBuilder seqNoAndPrimaryTerm(Boolean seqNoAndPrimaryTerm) { + this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; + return this; + } + + /** + * Indicates whether {@link org.elasticsearch.search.SearchHit}s should be returned with the + * sequence number and primary term of the last modification of the document. + */ + public Boolean seqNoAndPrimaryTerm() { + return seqNoAndPrimaryTerm; + } + /** * An optional timeout to control how long search is allowed to take. */ @@ -999,6 +1027,7 @@ private SearchSourceBuilder shallowCopy(QueryBuilder queryBuilder, QueryBuilder rewrittenBuilder.trackScores = trackScores; rewrittenBuilder.trackTotalHitsUpTo = trackTotalHitsUpTo; rewrittenBuilder.version = version; + rewrittenBuilder.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; rewrittenBuilder.collapse = collapse; return rewrittenBuilder; } @@ -1038,6 +1067,8 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th minScore = parser.floatValue(); } else if (VERSION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { version = parser.booleanValue(); + } else if (SEQ_NO_PRIMARY_TERM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + seqNoAndPrimaryTerm = parser.booleanValue(); } else if (EXPLAIN_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { explain = parser.booleanValue(); } else if (TRACK_SCORES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { @@ -1205,6 +1236,10 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(VERSION_FIELD.getPreferredName(), version); } + if (seqNoAndPrimaryTerm != null) { + builder.field(SEQ_NO_PRIMARY_TERM_FIELD.getPreferredName(), seqNoAndPrimaryTerm); + } + if (explain != null) { builder.field(EXPLAIN_FIELD.getPreferredName(), explain); } @@ -1523,7 +1558,7 @@ public int hashCode() { return Objects.hash(aggregations, explain, fetchSourceContext, docValueFields, storedFieldsContext, from, highlightBuilder, indexBoosts, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, size, sorts, searchAfterBuilder, sliceBuilder, stats, suggestBuilder, terminateAfter, timeout, trackScores, version, - profile, extBuilders, collapse, trackTotalHitsUpTo); + seqNoAndPrimaryTerm, profile, extBuilders, collapse, trackTotalHitsUpTo); } @Override @@ -1558,6 +1593,7 @@ public boolean equals(Object obj) { && Objects.equals(timeout, other.timeout) && Objects.equals(trackScores, other.trackScores) && Objects.equals(version, other.version) + && Objects.equals(seqNoAndPrimaryTerm, other.seqNoAndPrimaryTerm) && Objects.equals(profile, other.profile) && Objects.equals(extBuilders, other.extBuilders) && Objects.equals(collapse, other.collapse) diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/SeqNoPrimaryTermFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/SeqNoPrimaryTermFetchSubPhase.java new file mode 100644 index 0000000000000..31a6328ff9574 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/SeqNoPrimaryTermFetchSubPhase.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.fetch.subphase; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.ReaderUtil; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; + +public final class SeqNoPrimaryTermFetchSubPhase implements FetchSubPhase { + @Override + public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { + if (context.seqNoAndPrimaryTerm() == false) { + return; + } + + hits = hits.clone(); // don't modify the incoming hits + Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); + + int lastReaderId = -1; + NumericDocValues seqNoField = null; + NumericDocValues primaryTermField = null; + for (SearchHit hit : hits) { + int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); + LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId); + if (lastReaderId != readerId) { + seqNoField = subReaderContext.reader().getNumericDocValues(SeqNoFieldMapper.NAME); + primaryTermField = subReaderContext.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); + lastReaderId = readerId; + } + int docId = hit.docId() - subReaderContext.docBase; + long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; + long primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + // we have to check the primary term field as it is only assigned for non-nested documents + if (primaryTermField != null && primaryTermField.advanceExact(docId)) { + boolean found = seqNoField.advanceExact(docId); + assert found: "found seq no for " + docId + " but not a primary term"; + seqNo = seqNoField.longValue(); + primaryTerm = primaryTermField.longValue(); + } + hit.setSeqNo(seqNo); + hit.setPrimaryTerm(primaryTerm); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 3a7fb9f823f3a..ecde28719cec6 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -422,6 +422,16 @@ public void version(boolean version) { in.version(version); } + @Override + public boolean seqNoAndPrimaryTerm() { + return in.seqNoAndPrimaryTerm(); + } + + @Override + public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + in.seqNoAndPrimaryTerm(seqNoAndPrimaryTerm); + } + @Override public int[] docIdsToLoad() { return in.docIdsToLoad(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 768143dd8fb0b..bd6d9c501c8d1 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; -import org.elasticsearch.search.collapse.CollapseContext; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; @@ -46,6 +45,7 @@ import org.elasticsearch.search.SearchExtBuilder; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.SearchContextAggregations; +import org.elasticsearch.search.collapse.CollapseContext; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -309,6 +309,12 @@ public InnerHitsContext innerHits() { public abstract void version(boolean version); + /** indicates whether the sequence number and primary term of the last modification to each hit should be returned */ + public abstract boolean seqNoAndPrimaryTerm(); + + /** controls whether the sequence number and primary term of the last modification to each hit should be returned */ + public abstract void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm); + public abstract int[] docIdsToLoad(); public abstract int docIdsToLoadFrom(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index 8c8137f5e4345..fb4d233f10ee8 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -65,6 +65,7 @@ public class SubSearchContext extends FilteredSearchContext { private boolean explain; private boolean trackScores; private boolean version; + private boolean seqNoAndPrimaryTerm; public SubSearchContext(SearchContext context) { super(context); @@ -294,6 +295,16 @@ public void version(boolean version) { this.version = version; } + @Override + public boolean seqNoAndPrimaryTerm() { + return seqNoAndPrimaryTerm; + } + + @Override + public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; + } + @Override public int[] docIdsToLoad() { return docIdsToLoad; diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index c1692f606178e..cbd4ff659e599 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; @@ -52,7 +51,7 @@ public final class SnapshotInfo implements Comparable, ToXContent, public static final String CONTEXT_MODE_PARAM = "context_mode"; public static final String CONTEXT_MODE_SNAPSHOT = "SNAPSHOT"; - private static final DateFormatter DATE_TIME_FORMATTER = DateFormatters.forPattern("strictDateOptionalTime"); + private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("strictDateOptionalTime"); private static final String SNAPSHOT = "snapshot"; private static final String UUID = "uuid"; private static final String INDICES = "indices"; diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 86ed2095433b2..4e8c26ea593d6 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -1118,14 +1118,20 @@ public void deleteSnapshot(final String repositoryName, final String snapshotNam .filter(s -> s.getName().equals(snapshotName)) .findFirst(); // if nothing found by the same name, then look in the cluster state for current in progress snapshots + long repoGenId = repositoryData.getGenId(); if (matchedEntry.isPresent() == false) { - matchedEntry = currentSnapshots(repositoryName, Collections.emptyList()).stream() - .map(e -> e.snapshot().getSnapshotId()).filter(s -> s.getName().equals(snapshotName)).findFirst(); + Optional matchedInProgress = currentSnapshots(repositoryName, Collections.emptyList()).stream() + .filter(s -> s.snapshot().getSnapshotId().getName().equals(snapshotName)).findFirst(); + if (matchedInProgress.isPresent()) { + matchedEntry = matchedInProgress.map(s -> s.snapshot().getSnapshotId()); + // Derive repository generation if a snapshot is in progress because it will increment the generation when it finishes + repoGenId = matchedInProgress.get().getRepositoryStateId() + 1L; + } } if (matchedEntry.isPresent() == false) { throw new SnapshotMissingException(repositoryName, snapshotName); } - deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repositoryData.getGenId(), immediatePriority); + deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repoGenId, immediatePriority); } /** diff --git a/server/src/main/java/org/elasticsearch/transport/CompressibleBytesOutputStream.java b/server/src/main/java/org/elasticsearch/transport/CompressibleBytesOutputStream.java index 54f4d1d0c8d84..4116f88b14224 100644 --- a/server/src/main/java/org/elasticsearch/transport/CompressibleBytesOutputStream.java +++ b/server/src/main/java/org/elasticsearch/transport/CompressibleBytesOutputStream.java @@ -39,8 +39,8 @@ * written to this stream. If compression is enabled, the proper EOS bytes will be written at that point. * The underlying {@link BytesReference} will be returned. * - * {@link CompressibleBytesOutputStream#close()} should be called when the bytes are no longer needed and - * can be safely released. + * {@link CompressibleBytesOutputStream#close()} will NOT close the underlying stream. The byte stream passed + * in the constructor must be closed individually. */ final class CompressibleBytesOutputStream extends StreamOutput { @@ -92,12 +92,9 @@ public void flush() throws IOException { @Override public void close() throws IOException { - if (stream == bytesStreamOutput) { - assert shouldCompress == false : "If the streams are the same we should not be compressing"; - IOUtils.close(stream); - } else { + if (stream != bytesStreamOutput) { assert shouldCompress : "If the streams are different we should be compressing"; - IOUtils.close(stream, bytesStreamOutput); + IOUtils.close(stream); } } diff --git a/server/src/main/java/org/elasticsearch/transport/InboundMessage.java b/server/src/main/java/org/elasticsearch/transport/InboundMessage.java new file mode 100644 index 0000000000000..44e3b017ed27e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/InboundMessage.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.Compressor; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.compress.NotCompressedException; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.internal.io.IOUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.Set; +import java.util.TreeSet; + +public abstract class InboundMessage extends NetworkMessage implements Closeable { + + private final StreamInput streamInput; + + InboundMessage(ThreadContext threadContext, Version version, byte status, long requestId, StreamInput streamInput) { + super(threadContext, version, status, requestId); + this.streamInput = streamInput; + } + + StreamInput getStreamInput() { + return streamInput; + } + + static class Reader { + + private final Version version; + private final NamedWriteableRegistry namedWriteableRegistry; + private final ThreadContext threadContext; + + Reader(Version version, NamedWriteableRegistry namedWriteableRegistry, ThreadContext threadContext) { + this.version = version; + this.namedWriteableRegistry = namedWriteableRegistry; + this.threadContext = threadContext; + } + + InboundMessage deserialize(BytesReference reference) throws IOException { + int messageLengthBytes = reference.length(); + final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; + // we have additional bytes to read, outside of the header + boolean hasMessageBytesToRead = (totalMessageSize - TcpHeader.HEADER_SIZE) > 0; + StreamInput streamInput = reference.streamInput(); + boolean success = false; + try (ThreadContext.StoredContext existing = threadContext.stashContext()) { + long requestId = streamInput.readLong(); + byte status = streamInput.readByte(); + Version remoteVersion = Version.fromId(streamInput.readInt()); + final boolean isHandshake = TransportStatus.isHandshake(status); + ensureVersionCompatibility(remoteVersion, version, isHandshake); + if (TransportStatus.isCompress(status) && hasMessageBytesToRead && streamInput.available() > 0) { + Compressor compressor; + try { + final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; + compressor = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)); + } catch (NotCompressedException ex) { + int maxToRead = Math.min(reference.length(), 10); + StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [") + .append(maxToRead).append("] content bytes out of [").append(reference.length()) + .append("] readable bytes with message size [").append(messageLengthBytes).append("] ").append("] are ["); + for (int i = 0; i < maxToRead; i++) { + sb.append(reference.get(i)).append(","); + } + sb.append("]"); + throw new IllegalStateException(sb.toString()); + } + streamInput = compressor.streamInput(streamInput); + } + streamInput = new NamedWriteableAwareStreamInput(streamInput, namedWriteableRegistry); + streamInput.setVersion(remoteVersion); + + threadContext.readHeaders(streamInput); + + InboundMessage message; + if (TransportStatus.isRequest(status)) { + final Set features; + if (remoteVersion.onOrAfter(Version.V_6_3_0)) { + features = Collections.unmodifiableSet(new TreeSet<>(Arrays.asList(streamInput.readStringArray()))); + } else { + features = Collections.emptySet(); + } + final String action = streamInput.readString(); + message = new RequestMessage(threadContext, remoteVersion, status, requestId, action, features, streamInput); + } else { + message = new ResponseMessage(threadContext, remoteVersion, status, requestId, streamInput); + } + success = true; + return message; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(streamInput); + } + } + } + } + + @Override + public void close() throws IOException { + streamInput.close(); + } + + private static void ensureVersionCompatibility(Version version, Version currentVersion, boolean isHandshake) { + // for handshakes we are compatible with N-2 since otherwise we can't figure out our initial version + // since we are compatible with N-1 and N+1 so we always send our minCompatVersion as the initial version in the + // handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility + // once the connection is established + final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion; + if (version.isCompatible(compatibilityVersion) == false) { + final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion(); + String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: ["; + throw new IllegalStateException(msg + version + "] minimal compatible version is: [" + minCompatibilityVersion + "]"); + } + } + + public static class RequestMessage extends InboundMessage { + + private final String actionName; + private final Set features; + + RequestMessage(ThreadContext threadContext, Version version, byte status, long requestId, String actionName, Set features, + StreamInput streamInput) { + super(threadContext, version, status, requestId, streamInput); + this.actionName = actionName; + this.features = features; + } + + String getActionName() { + return actionName; + } + + Set getFeatures() { + return features; + } + } + + public static class ResponseMessage extends InboundMessage { + + ResponseMessage(ThreadContext threadContext, Version version, byte status, long requestId, StreamInput streamInput) { + super(threadContext, version, status, requestId, streamInput); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java new file mode 100644 index 0000000000000..7d8dbb8a0f1b6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +/** + * Represents a transport message sent over the network. Subclasses implement serialization and + * deserialization. + */ +public abstract class NetworkMessage { + + protected final Version version; + protected final ThreadContext threadContext; + protected final ThreadContext.StoredContext storedContext; + protected final long requestId; + protected final byte status; + + NetworkMessage(ThreadContext threadContext, Version version, byte status, long requestId) { + this.threadContext = threadContext; + storedContext = threadContext.stashContext(); + storedContext.restore(); + this.version = version; + this.requestId = requestId; + this.status = status; + } + + public Version getVersion() { + return version; + } + + public long getRequestId() { + return requestId; + } + + boolean isCompress() { + return TransportStatus.isCompress(status); + } + + ThreadContext.StoredContext getStoredContext() { + return storedContext; + } + + boolean isResponse() { + return TransportStatus.isRequest(status) == false; + } + + boolean isRequest() { + return TransportStatus.isRequest(status); + } + + boolean isHandshake() { + return TransportStatus.isHandshake(status); + } + + boolean isError() { + return TransportStatus.isError(status); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java new file mode 100644 index 0000000000000..31c67d930c538 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java @@ -0,0 +1,167 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.NotifyOnceListener; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.network.CloseableChannel; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; + +final class OutboundHandler { + + private static final Logger logger = LogManager.getLogger(OutboundHandler.class); + + private final MeanMetric transmittedBytesMetric = new MeanMetric(); + private final ThreadPool threadPool; + private final BigArrays bigArrays; + private final TransportLogger transportLogger; + + OutboundHandler(ThreadPool threadPool, BigArrays bigArrays, TransportLogger transportLogger) { + this.threadPool = threadPool; + this.bigArrays = bigArrays; + this.transportLogger = transportLogger; + } + + void sendBytes(TcpChannel channel, BytesReference bytes, ActionListener listener) { + channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); + SendContext sendContext = new SendContext(channel, () -> bytes, listener); + try { + internalSendMessage(channel, sendContext); + } catch (IOException e) { + // This should not happen as the bytes are already serialized + throw new AssertionError(e); + } + } + + void sendMessage(TcpChannel channel, OutboundMessage networkMessage, ActionListener listener) throws IOException { + channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); + MessageSerializer serializer = new MessageSerializer(networkMessage, bigArrays); + SendContext sendContext = new SendContext(channel, serializer, listener, serializer); + internalSendMessage(channel, sendContext); + } + + /** + * sends a message to the given channel, using the given callbacks. + */ + private void internalSendMessage(TcpChannel channel, SendContext sendContext) throws IOException { + channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); + BytesReference reference = sendContext.get(); + try { + channel.sendMessage(reference, sendContext); + } catch (RuntimeException ex) { + sendContext.onFailure(ex); + CloseableChannel.closeChannel(channel); + throw ex; + } + + } + + MeanMetric getTransmittedBytes() { + return transmittedBytesMetric; + } + + private static class MessageSerializer implements CheckedSupplier, Releasable { + + private final OutboundMessage message; + private final BigArrays bigArrays; + private volatile ReleasableBytesStreamOutput bytesStreamOutput; + + private MessageSerializer(OutboundMessage message, BigArrays bigArrays) { + this.message = message; + this.bigArrays = bigArrays; + } + + @Override + public BytesReference get() throws IOException { + bytesStreamOutput = new ReleasableBytesStreamOutput(bigArrays); + return message.serialize(bytesStreamOutput); + } + + @Override + public void close() { + IOUtils.closeWhileHandlingException(bytesStreamOutput); + } + } + + private class SendContext extends NotifyOnceListener implements CheckedSupplier { + + private final TcpChannel channel; + private final CheckedSupplier messageSupplier; + private final ActionListener listener; + private final Releasable optionalReleasable; + private long messageSize = -1; + + private SendContext(TcpChannel channel, CheckedSupplier messageSupplier, + ActionListener listener) { + this(channel, messageSupplier, listener, null); + } + + private SendContext(TcpChannel channel, CheckedSupplier messageSupplier, + ActionListener listener, Releasable optionalReleasable) { + this.channel = channel; + this.messageSupplier = messageSupplier; + this.listener = listener; + this.optionalReleasable = optionalReleasable; + } + + public BytesReference get() throws IOException { + BytesReference message; + try { + message = messageSupplier.get(); + messageSize = message.length(); + transportLogger.logOutboundMessage(channel, message); + return message; + } catch (Exception e) { + onFailure(e); + throw e; + } + } + + @Override + protected void innerOnResponse(Void v) { + assert messageSize != -1 : "If onResponse is being called, the message should have been serialized"; + transmittedBytesMetric.inc(messageSize); + closeAndCallback(() -> listener.onResponse(v)); + } + + @Override + protected void innerOnFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e); + closeAndCallback(() -> listener.onFailure(e)); + } + + private void closeAndCallback(Runnable runnable) { + Releasables.close(optionalReleasable, runnable::run); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java new file mode 100644 index 0000000000000..cc295a68df3c7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.io.IOException; +import java.util.Set; + +abstract class OutboundMessage extends NetworkMessage implements Writeable { + + private final Writeable message; + + OutboundMessage(ThreadContext threadContext, Version version, byte status, long requestId, Writeable message) { + super(threadContext, version, status, requestId); + this.message = message; + } + + BytesReference serialize(BytesStreamOutput bytesStream) throws IOException { + storedContext.restore(); + bytesStream.setVersion(version); + bytesStream.skip(TcpHeader.HEADER_SIZE); + + // The compressible bytes stream will not close the underlying bytes stream + BytesReference reference; + try (CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bytesStream, TransportStatus.isCompress(status))) { + stream.setVersion(version); + threadContext.writeTo(stream); + writeTo(stream); + reference = writeMessage(stream); + } + bytesStream.seek(0); + TcpHeader.writeHeader(bytesStream, requestId, status, version, reference.length() - TcpHeader.HEADER_SIZE); + return reference; + } + + private BytesReference writeMessage(CompressibleBytesOutputStream stream) throws IOException { + final BytesReference zeroCopyBuffer; + if (message instanceof BytesTransportRequest) { + BytesTransportRequest bRequest = (BytesTransportRequest) message; + bRequest.writeThin(stream); + zeroCopyBuffer = bRequest.bytes; + } else if (message instanceof RemoteTransportException) { + stream.writeException((RemoteTransportException) message); + zeroCopyBuffer = BytesArray.EMPTY; + } else { + message.writeTo(stream); + zeroCopyBuffer = BytesArray.EMPTY; + } + // we have to call materializeBytes() here before accessing the bytes. A CompressibleBytesOutputStream + // might be implementing compression. And materializeBytes() ensures that some marker bytes (EOS marker) + // are written. Otherwise we barf on the decompressing end when we read past EOF on purpose in the + // #validateRequest method. this might be a problem in deflate after all but it's important to write + // the marker bytes. + final BytesReference message = stream.materializeBytes(); + if (zeroCopyBuffer.length() == 0) { + return message; + } else { + return new CompositeBytesReference(message, zeroCopyBuffer); + } + } + + static class Request extends OutboundMessage { + + private final String[] features; + private final String action; + + Request(ThreadContext threadContext, String[] features, Writeable message, Version version, String action, long requestId, + boolean isHandshake, boolean compress) { + super(threadContext, version, setStatus(compress, isHandshake, message), requestId, message); + this.features = features; + this.action = action; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (version.onOrAfter(Version.V_6_3_0)) { + out.writeStringArray(features); + } + out.writeString(action); + } + + private static byte setStatus(boolean compress, boolean isHandshake, Writeable message) { + byte status = 0; + status = TransportStatus.setRequest(status); + if (compress && OutboundMessage.canCompress(message)) { + status = TransportStatus.setCompress(status); + } + if (isHandshake) { + status = TransportStatus.setHandshake(status); + } + + return status; + } + } + + static class Response extends OutboundMessage { + + private final Set features; + + Response(ThreadContext threadContext, Set features, Writeable message, Version version, long requestId, + boolean isHandshake, boolean compress) { + super(threadContext, version, setStatus(compress, isHandshake, message), requestId, message); + this.features = features; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.setFeatures(features); + } + + private static byte setStatus(boolean compress, boolean isHandshake, Writeable message) { + byte status = 0; + status = TransportStatus.setResponse(status); + if (message instanceof RemoteTransportException) { + status = TransportStatus.setError(status); + } + if (compress) { + status = TransportStatus.setCompress(status); + } + if (isHandshake) { + status = TransportStatus.setHandshake(status); + } + + return status; + } + } + + private static boolean canCompress(Writeable message) { + return message instanceof BytesTransportRequest == false; + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 7ea55925262ff..d7e3de92e4028 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -25,9 +25,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -62,7 +59,6 @@ import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; @@ -172,6 +168,13 @@ void updateSkipUnavailable(boolean skipUnavailable) { this.skipUnavailable = skipUnavailable; } + /** + * Returns whether this cluster is configured to be skipped when unavailable + */ + boolean isSkipUnavailable() { + return skipUnavailable; + } + @Override public void onNodeDisconnected(DiscoveryNode node) { boolean remove = connectedNodes.remove(node); @@ -181,31 +184,11 @@ public void onNodeDisconnected(DiscoveryNode node) { } } - /** - * Fetches all shards for the search request from this remote connection. This is used to later run the search on the remote end. - */ - public void fetchSearchShards(ClusterSearchShardsRequest searchRequest, - ActionListener listener) { - - final ActionListener searchShardsListener; - final Consumer onConnectFailure; - if (skipUnavailable) { - onConnectFailure = (exception) -> listener.onResponse(ClusterSearchShardsResponse.EMPTY); - searchShardsListener = ActionListener.wrap(listener::onResponse, (e) -> listener.onResponse(ClusterSearchShardsResponse.EMPTY)); - } else { - onConnectFailure = listener::onFailure; - searchShardsListener = listener; - } - // in case we have no connected nodes we try to connect and if we fail we either notify the listener or not depending on - // the skip_unavailable setting - ensureConnected(ActionListener.wrap((x) -> fetchShardsInternal(searchRequest, searchShardsListener), onConnectFailure)); - } - /** * Ensures that this cluster is connected. If the cluster is connected this operation * will invoke the listener immediately. */ - public void ensureConnected(ActionListener voidActionListener) { + void ensureConnected(ActionListener voidActionListener) { if (connectedNodes.size() == 0) { connectHandler.connect(voidActionListener); } else { @@ -213,35 +196,6 @@ public void ensureConnected(ActionListener voidActionListener) { } } - private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest, - final ActionListener listener) { - final DiscoveryNode node = getAnyConnectedNode(); - Transport.Connection connection = connectionManager.getConnection(node); - transportService.sendRequest(connection, ClusterSearchShardsAction.NAME, searchShardsRequest, TransportRequestOptions.EMPTY, - new TransportResponseHandler() { - - @Override - public ClusterSearchShardsResponse read(StreamInput in) throws IOException { - return new ClusterSearchShardsResponse(in); - } - - @Override - public void handleResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { - listener.onResponse(clusterSearchShardsResponse); - } - - @Override - public void handleException(TransportException e) { - listener.onFailure(e); - } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } - }); - } - /** * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link DiscoveryNode} lookup function * that returns null if the node ID is not found. diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index d9fcb01df4ce8..7d19b2eebcb1d 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -24,8 +24,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; @@ -50,10 +48,8 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.Predicate; @@ -287,7 +283,7 @@ public Map groupIndices(IndicesOptions indicesOptions, String clusterAlias = entry.getKey(); List originalIndices = entry.getValue(); originalIndicesMap.put(clusterAlias, - new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), indicesOptions)); + new OriginalIndices(originalIndices.toArray(new String[0]), indicesOptions)); } } } else { @@ -311,55 +307,6 @@ public Set getRegisteredRemoteClusterNames() { return remoteClusters.keySet(); } - public void collectSearchShards(IndicesOptions indicesOptions, String preference, String routing, - Map remoteIndicesByCluster, - ActionListener> listener) { - final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); - final Map searchShardsResponses = new ConcurrentHashMap<>(); - final AtomicReference transportException = new AtomicReference<>(); - for (Map.Entry entry : remoteIndicesByCluster.entrySet()) { - final String clusterName = entry.getKey(); - RemoteClusterConnection remoteClusterConnection = remoteClusters.get(clusterName); - if (remoteClusterConnection == null) { - throw new IllegalArgumentException("no such remote cluster: " + clusterName); - } - final String[] indices = entry.getValue().indices(); - ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices) - .indicesOptions(indicesOptions).local(true).preference(preference) - .routing(routing); - remoteClusterConnection.fetchSearchShards(searchShardsRequest, - new ActionListener() { - @Override - public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { - searchShardsResponses.put(clusterName, clusterSearchShardsResponse); - if (responsesCountDown.countDown()) { - RemoteTransportException exception = transportException.get(); - if (exception == null) { - listener.onResponse(searchShardsResponses); - } else { - listener.onFailure(transportException.get()); - } - } - } - - @Override - public void onFailure(Exception e) { - RemoteTransportException exception = - new RemoteTransportException("error while communicating with remote cluster [" + clusterName + "]", e); - if (transportException.compareAndSet(null, exception) == false) { - exception = transportException.accumulateAndGet(exception, (previous, current) -> { - current.addSuppressed(previous); - return current; - }); - } - if (responsesCountDown.countDown()) { - listener.onFailure(exception); - } - } - }); - } - } - /** * Returns a connection to the given node on the given remote cluster * @throws IllegalArgumentException if the remote cluster is unknown @@ -376,6 +323,13 @@ void ensureConnected(String clusterAlias, ActionListener listener) { getRemoteClusterConnection(clusterAlias).ensureConnected(listener); } + /** + * Returns whether the cluster identified by the provided alias is configured to be skipped when unavailable + */ + public boolean isSkipUnavailable(String clusterAlias) { + return getRemoteClusterConnection(clusterAlias).isSkipUnavailable(); + } + public Transport.Connection getConnection(String cluster) { return getRemoteClusterConnection(cluster).getConnection(); } @@ -399,7 +353,7 @@ public void listenForUpdates(ClusterSettings clusterSettings) { clusterSettings.addAffixUpdateConsumer(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); } - synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { + private synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { RemoteClusterConnection remote = this.remoteClusters.get(clusterAlias); if (remote != null) { remote.updateSkipUnavailable(skipUnavailable); @@ -510,5 +464,4 @@ public Client getRemoteClusterClient(ThreadPool threadPool, String clusterAlias) Collection getConnections() { return remoteClusters.values(); } - } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index dd6a21acc8fbd..e0b466e2ec631 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -26,24 +26,16 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.NotifyOnceListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.CompositeBytesReference; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.common.compress.Compressor; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.compress.NotCompressedException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.metrics.MeanMetric; @@ -64,17 +56,14 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; -import java.io.Closeable; import java.io.IOException; import java.io.StreamCorruptedException; -import java.io.UncheckedIOException; import java.net.BindException; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -136,8 +125,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final Map> serverChannels = newConcurrentMap(); private final Set acceptedChannels = ConcurrentCollections.newConcurrentSet(); - private final NamedWriteableRegistry namedWriteableRegistry; - // this lock is here to make sure we close this transport and disconnect all the client nodes // connections while no connect operations is going on private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); @@ -145,15 +132,16 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final String transportName; private final MeanMetric readBytesMetric = new MeanMetric(); - private final MeanMetric transmittedBytesMetric = new MeanMetric(); private volatile Map> requestHandlers = Collections.emptyMap(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); private final TransportLogger transportLogger; private final TransportHandshaker handshaker; private final TransportKeepAlive keepAlive; + private final InboundMessage.Reader reader; + private final OutboundHandler outboundHandler; private final String nodeName; - public TcpTransport(String transportName, Settings settings, Version version, ThreadPool threadPool, + public TcpTransport(String transportName, Settings settings, Version version, ThreadPool threadPool, PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { this.settings = settings; @@ -163,17 +151,18 @@ public TcpTransport(String transportName, Settings settings, Version version, T this.bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.IN_FLIGHT_REQUESTS); this.pageCacheRecycler = pageCacheRecycler; this.circuitBreakerService = circuitBreakerService; - this.namedWriteableRegistry = namedWriteableRegistry; this.networkService = networkService; this.transportName = transportName; this.transportLogger = new TransportLogger(); + this.outboundHandler = new OutboundHandler(threadPool, bigArrays, transportLogger); this.handshaker = new TransportHandshaker(version, threadPool, (node, channel, requestId, v) -> sendRequestToChannel(node, channel, requestId, TransportHandshaker.HANDSHAKE_ACTION_NAME, new TransportHandshaker.HandshakeRequest(version), - TransportRequestOptions.EMPTY, v, false, TransportStatus.setHandshake((byte) 0)), + TransportRequestOptions.EMPTY, v, false, true), (v, features, channel, response, requestId) -> sendResponse(v, features, channel, response, requestId, - TransportHandshaker.HANDSHAKE_ACTION_NAME, false, TransportStatus.setHandshake((byte) 0))); - this.keepAlive = new TransportKeepAlive(threadPool, this::internalSendMessage); + TransportHandshaker.HANDSHAKE_ACTION_NAME, false, true)); + this.keepAlive = new TransportKeepAlive(threadPool, this.outboundHandler::sendBytes); + this.reader = new InboundMessage.Reader(version, namedWriteableRegistry, threadPool.getThreadContext()); this.nodeName = Node.NODE_NAME_SETTING.get(settings); final Settings defaultFeatures = TransportSettings.DEFAULT_FEATURES_SETTING.get(settings); @@ -280,7 +269,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, throw new NodeNotConnectedException(node, "connection already closed"); } TcpChannel channel = channel(options.type()); - sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), compress, (byte) 0); + sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), compress); } } @@ -573,7 +562,8 @@ protected final void doStop() { for (Map.Entry> entry : serverChannels.entrySet()) { String profile = entry.getKey(); List channels = entry.getValue(); - ActionListener closeFailLogger = ActionListener.wrap(c -> {}, + ActionListener closeFailLogger = ActionListener.wrap(c -> { + }, e -> logger.warn(() -> new ParameterizedMessage("Error closing serverChannel for profile [{}]", profile), e)); channels.forEach(c -> c.addCloseListener(closeFailLogger)); CloseableChannel.closeChannels(channels, true); @@ -628,26 +618,7 @@ public void onException(TcpChannel channel, Exception e) { // in case we are able to return data, serialize the exception content and sent it back to the client if (channel.isOpen()) { BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)); - ActionListener listener = new ActionListener() { - @Override - public void onResponse(Void aVoid) { - CloseableChannel.closeChannel(channel); - } - - @Override - public void onFailure(Exception e) { - logger.debug("failed to send message to httpOnTransport channel", e); - CloseableChannel.closeChannel(channel); - } - }; - // We do not call internalSendMessage because we are not sending a message that is an - // elasticsearch binary message. We are just serializing an exception here. Not formatting it - // as an elasticsearch transport message. - try { - channel.sendMessage(message, new SendListener(channel, message.length(), listener)); - } catch (Exception ex) { - listener.onFailure(ex); - } + outboundHandler.sendBytes(channel, message, ActionListener.wrap(() -> CloseableChannel.closeChannel(channel))); } } else { logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); @@ -660,16 +631,6 @@ protected void onServerException(TcpServerChannel channel, Exception e) { logger.error(new ParameterizedMessage("exception from server channel caught on transport layer [channel={}]", channel), e); } - /** - * Exception handler for exceptions that are not associated with a specific channel. - * - * @param exception the exception - */ - protected void onNonChannelException(Exception exception) { - logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), - exception); - } - protected void serverAcceptedChannel(TcpChannel channel) { boolean addedOnThisCall = acceptedChannels.add(channel); assert addedOnThisCall : "Channel should only be added to accepted channel set once"; @@ -701,65 +662,21 @@ protected void serverAcceptedChannel(TcpChannel channel) { */ protected abstract void stopInternal(); - private boolean canCompress(TransportRequest request) { - return request instanceof BytesTransportRequest == false; - } - private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options, Version channelVersion, - boolean compressRequest, byte status) throws IOException, TransportException { - - // only compress if asked and the request is not bytes. Otherwise only - // the header part is compressed, and the "body" can't be extracted as compressed - final boolean compressMessage = compressRequest && canCompress(request); - - status = TransportStatus.setRequest(status); - ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); - final CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bStream, compressMessage); - boolean addedReleaseListener = false; - try { - if (compressMessage) { - status = TransportStatus.setCompress(status); - } - - // we pick the smallest of the 2, to support both backward and forward compatibility - // note, this is the only place we need to do this, since from here on, we use the serialized version - // as the version to use also when the node receiving this request will send the response with - Version version = Version.min(this.version, channelVersion); - - stream.setVersion(version); - threadPool.getThreadContext().writeTo(stream); - if (version.onOrAfter(Version.V_6_3_0)) { - stream.writeStringArray(features); - } - stream.writeString(action); - BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream); - final TransportRequestOptions finalOptions = options; - // this might be called in a different thread - ReleaseListener releaseListener = new ReleaseListener(stream, - () -> messageListener.onRequestSent(node, requestId, action, request, finalOptions)); - internalSendMessage(channel, message, releaseListener); - addedReleaseListener = true; - } finally { - if (!addedReleaseListener) { - IOUtils.close(stream); - } - } + boolean compressRequest) throws IOException, TransportException { + sendRequestToChannel(node, channel, requestId, action, request, options, channelVersion, compressRequest, false); } - /** - * sends a message to the given channel, using the given callbacks. - */ - private void internalSendMessage(TcpChannel channel, BytesReference message, ActionListener listener) { - channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); - transportLogger.logOutboundMessage(channel, message); - try { - channel.sendMessage(message, new SendListener(channel, message.length(), listener)); - } catch (Exception ex) { - // call listener to ensure that any resources are released - listener.onFailure(ex); - onException(channel, ex); - } + private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action, + final TransportRequest request, TransportRequestOptions options, Version channelVersion, + boolean compressRequest, boolean isHandshake) throws IOException, TransportException { + Version version = Version.min(this.version, channelVersion); + OutboundMessage.Request message = new OutboundMessage.Request(threadPool.getThreadContext(), features, request, version, action, + requestId, isHandshake, compressRequest); + ActionListener listener = ActionListener.wrap(() -> + messageListener.onRequestSent(node, requestId, action, request, options)); + outboundHandler.sendMessage(channel, message, listener); } /** @@ -779,23 +696,13 @@ public void sendErrorResponse( final Exception error, final long requestId, final String action) throws IOException { - try (BytesStreamOutput stream = new BytesStreamOutput()) { - stream.setVersion(nodeVersion); - stream.setFeatures(features); - RemoteTransportException tx = new RemoteTransportException( - nodeName, new TransportAddress(channel.getLocalAddress()), action, error); - threadPool.getThreadContext().writeTo(stream); - stream.writeException(tx); - byte status = 0; - status = TransportStatus.setResponse(status); - status = TransportStatus.setError(status); - final BytesReference bytes = stream.bytes(); - final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); - CompositeBytesReference message = new CompositeBytesReference(header, bytes); - ReleaseListener releaseListener = new ReleaseListener(null, - () -> messageListener.onResponseSent(requestId, action, error)); - internalSendMessage(channel, message, releaseListener); - } + Version version = Version.min(this.version, nodeVersion); + TransportAddress address = new TransportAddress(channel.getLocalAddress()); + RemoteTransportException tx = new RemoteTransportException(nodeName, address, action, error); + OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), features, tx, version, requestId, + false, false); + ActionListener listener = ActionListener.wrap(() -> messageListener.onResponseSent(requestId, action, error)); + outboundHandler.sendMessage(channel, message, listener); } /** @@ -811,7 +718,7 @@ public void sendResponse( final long requestId, final String action, final boolean compress) throws IOException { - sendResponse(nodeVersion, features, channel, response, requestId, action, compress, (byte) 0); + sendResponse(nodeVersion, features, channel, response, requestId, action, compress, false); } private void sendResponse( @@ -822,82 +729,18 @@ private void sendResponse( final long requestId, final String action, boolean compress, - byte status) throws IOException { - - status = TransportStatus.setResponse(status); - ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); - CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bStream, compress); - boolean addedReleaseListener = false; - try { - if (compress) { - status = TransportStatus.setCompress(status); - } - threadPool.getThreadContext().writeTo(stream); - stream.setVersion(nodeVersion); - stream.setFeatures(features); - BytesReference message = buildMessage(requestId, status, nodeVersion, response, stream); - - // this might be called in a different thread - ReleaseListener releaseListener = new ReleaseListener(stream, - () -> messageListener.onResponseSent(requestId, action, response)); - internalSendMessage(channel, message, releaseListener); - addedReleaseListener = true; - } finally { - if (!addedReleaseListener) { - IOUtils.close(stream); - } - } - } - - /** - * Writes the Tcp message header into a bytes reference. - * - * @param requestId the request ID - * @param status the request status - * @param protocolVersion the protocol version used to serialize the data in the message - * @param length the payload length in bytes - * @see TcpHeader - */ - private BytesReference buildHeader(long requestId, byte status, Version protocolVersion, int length) throws IOException { - try (BytesStreamOutput headerOutput = new BytesStreamOutput(TcpHeader.HEADER_SIZE)) { - headerOutput.setVersion(protocolVersion); - TcpHeader.writeHeader(headerOutput, requestId, status, protocolVersion, length); - final BytesReference bytes = headerOutput.bytes(); - assert bytes.length() == TcpHeader.HEADER_SIZE : "header size mismatch expected: " + TcpHeader.HEADER_SIZE + " but was: " - + bytes.length(); - return bytes; - } - } - - /** - * Serializes the given message into a bytes representation - */ - private BytesReference buildMessage(long requestId, byte status, Version nodeVersion, TransportMessage message, - CompressibleBytesOutputStream stream) throws IOException { - final BytesReference zeroCopyBuffer; - if (message instanceof BytesTransportRequest) { // what a shitty optimization - we should use a direct send method instead - BytesTransportRequest bRequest = (BytesTransportRequest) message; - assert nodeVersion.equals(bRequest.version()); - bRequest.writeThin(stream); - zeroCopyBuffer = bRequest.bytes; - } else { - message.writeTo(stream); - zeroCopyBuffer = BytesArray.EMPTY; - } - // we have to call materializeBytes() here before accessing the bytes. A CompressibleBytesOutputStream - // might be implementing compression. And materializeBytes() ensures that some marker bytes (EOS marker) - // are written. Otherwise we barf on the decompressing end when we read past EOF on purpose in the - // #validateRequest method. this might be a problem in deflate after all but it's important to write - // the marker bytes. - final BytesReference messageBody = stream.materializeBytes(); - final BytesReference header = buildHeader(requestId, status, stream.getVersion(), messageBody.length() + zeroCopyBuffer.length()); - return new CompositeBytesReference(header, messageBody, zeroCopyBuffer); + boolean isHandshake) throws IOException { + Version version = Version.min(this.version, nodeVersion); + OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), features, response, version, + requestId, isHandshake, compress); + ActionListener listener = ActionListener.wrap(() -> messageListener.onResponseSent(requestId, action, response)); + outboundHandler.sendMessage(channel, message, listener); } /** * Handles inbound message that has been decoded. * - * @param channel the channel the message if fomr + * @param channel the channel the message is from * @param message the message */ public void inboundMessage(TcpChannel channel, BytesReference message) { @@ -1065,53 +908,26 @@ public HttpOnTransportException(StreamInput in) throws IOException { * This method handles the message receive part for both request and responses */ public final void messageReceived(BytesReference reference, TcpChannel channel) throws IOException { - String profileName = channel.getProfile(); + readBytesMetric.inc(reference.length() + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE); InetSocketAddress remoteAddress = channel.getRemoteAddress(); - int messageLengthBytes = reference.length(); - final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; - readBytesMetric.inc(totalMessageSize); - // we have additional bytes to read, outside of the header - boolean hasMessageBytesToRead = (totalMessageSize - TcpHeader.HEADER_SIZE) > 0; - StreamInput streamIn = reference.streamInput(); - boolean success = false; - try (ThreadContext.StoredContext tCtx = threadPool.getThreadContext().stashContext()) { - long requestId = streamIn.readLong(); - byte status = streamIn.readByte(); - Version version = Version.fromId(streamIn.readInt()); - if (TransportStatus.isCompress(status) && hasMessageBytesToRead && streamIn.available() > 0) { - Compressor compressor; - try { - final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; - compressor = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)); - } catch (NotCompressedException ex) { - int maxToRead = Math.min(reference.length(), 10); - StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead) - .append("] content bytes out of [").append(reference.length()) - .append("] readable bytes with message size [").append(messageLengthBytes).append("] ").append("] are ["); - for (int i = 0; i < maxToRead; i++) { - sb.append(reference.get(i)).append(","); - } - sb.append("]"); - throw new IllegalStateException(sb.toString()); - } - streamIn = compressor.streamInput(streamIn); - } - final boolean isHandshake = TransportStatus.isHandshake(status); - ensureVersionCompatibility(version, this.version, isHandshake); - streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry); - streamIn.setVersion(version); - threadPool.getThreadContext().readHeaders(streamIn); - threadPool.getThreadContext().putTransient("_remote_address", remoteAddress); - if (TransportStatus.isRequest(status)) { - handleRequest(channel, profileName, streamIn, requestId, messageLengthBytes, version, remoteAddress, status); + + ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext existing = threadContext.stashContext(); + InboundMessage message = reader.deserialize(reference)) { + // Place the context with the headers from the message + message.getStoredContext().restore(); + threadContext.putTransient("_remote_address", remoteAddress); + if (message.isRequest()) { + handleRequest(channel, (InboundMessage.RequestMessage) message, reference.length()); } else { final TransportResponseHandler handler; - if (isHandshake) { + long requestId = message.getRequestId(); + if (message.isHandshake()) { handler = handshaker.removeHandlerForHandshake(requestId); } else { TransportResponseHandler theHandler = responseHandlers.onResponseReceived(requestId, messageListener); - if (theHandler == null && TransportStatus.isError(status)) { + if (theHandler == null && message.isError()) { handler = handshaker.removeHandlerForHandshake(requestId); } else { handler = theHandler; @@ -1119,40 +935,20 @@ public final void messageReceived(BytesReference reference, TcpChannel channel) } // ignore if its null, the service logs it if (handler != null) { - if (TransportStatus.isError(status)) { - handlerResponseError(streamIn, handler); + if (message.isError()) { + handlerResponseError(message.getStreamInput(), handler); } else { - handleResponse(remoteAddress, streamIn, handler); + handleResponse(remoteAddress, message.getStreamInput(), handler); } // Check the entire message has been read - final int nextByte = streamIn.read(); + final int nextByte = message.getStreamInput().read(); // calling read() is useful to make sure the message is fully read, even if there is an EOS marker if (nextByte != -1) { throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler [" - + handler + "], error [" + TransportStatus.isError(status) + "]; resetting"); + + handler + "], error [" + message.isError() + "]; resetting"); } } } - success = true; - } finally { - if (success) { - IOUtils.close(streamIn); - } else { - IOUtils.closeWhileHandlingException(streamIn); - } - } - } - - static void ensureVersionCompatibility(Version version, Version currentVersion, boolean isHandshake) { - // for handshakes we are compatible with N-2 since otherwise we can't figure out our initial version - // since we are compatible with N-1 and N+1 so we always send our minCompatVersion as the initial version in the - // handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility - // once the connection is established - final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion; - if (version.isCompatible(compatibilityVersion) == false) { - final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion(); - String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: ["; - throw new IllegalStateException(msg + version + "] minimal compatible version is: [" + minCompatibilityVersion + "]"); } } @@ -1208,20 +1004,17 @@ private void handleException(final TransportResponseHandler handler, Throwable e }); } - protected String handleRequest(TcpChannel channel, String profileName, final StreamInput stream, long requestId, - int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) - throws IOException { - final Set features; - if (version.onOrAfter(Version.V_6_3_0)) { - features = Collections.unmodifiableSet(new TreeSet<>(Arrays.asList(stream.readStringArray()))); - } else { - features = Collections.emptySet(); - } - final String action = stream.readString(); + protected void handleRequest(TcpChannel channel, InboundMessage.RequestMessage message, int messageLengthBytes) throws IOException { + final Set features = message.getFeatures(); + final String profileName = channel.getProfile(); + final String action = message.getActionName(); + final long requestId = message.getRequestId(); + final StreamInput stream = message.getStreamInput(); + final Version version = message.getVersion(); messageListener.onRequestReceived(requestId, action); TransportChannel transportChannel = null; try { - if (TransportStatus.isHandshake(status)) { + if (message.isHandshake()) { handshaker.handleHandshake(version, features, channel, requestId, stream); } else { final RequestHandlerRegistry reg = getRequestHandler(action); @@ -1234,9 +1027,9 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str getInFlightRequestBreaker().addWithoutBreaking(messageLengthBytes); } transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, features, profileName, - messageLengthBytes, TransportStatus.isCompress(status)); + messageLengthBytes, message.isCompress()); final TransportRequest request = reg.newRequest(stream); - request.remoteAddress(new TransportAddress(remoteAddress)); + request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); // in case we throw an exception, i.e. when the limit is hit, we don't want to verify validateRequest(stream, requestId, action); threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); @@ -1245,7 +1038,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str // the circuit breaker tripped if (transportChannel == null) { transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, features, - profileName, 0, TransportStatus.isCompress(status)); + profileName, 0, message.isCompress()); } try { transportChannel.sendResponse(e); @@ -1254,7 +1047,6 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner); } } - return action; } // This template method is needed to inject custom error checking logic in tests. @@ -1331,70 +1123,11 @@ protected final void ensureOpen() { } } - /** - * This listener increments the transmitted bytes metric on success. - */ - private class SendListener extends NotifyOnceListener { - - private final TcpChannel channel; - private final long messageSize; - private final ActionListener delegateListener; - - private SendListener(TcpChannel channel, long messageSize, ActionListener delegateListener) { - this.channel = channel; - this.messageSize = messageSize; - this.delegateListener = delegateListener; - } - - @Override - protected void innerOnResponse(Void v) { - transmittedBytesMetric.inc(messageSize); - delegateListener.onResponse(v); - } - - @Override - protected void innerOnFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e); - delegateListener.onFailure(e); - } - } - - private class ReleaseListener implements ActionListener { - - private final Closeable optionalCloseable; - private final Runnable transportAdaptorCallback; - - private ReleaseListener(Closeable optionalCloseable, Runnable transportAdaptorCallback) { - this.optionalCloseable = optionalCloseable; - this.transportAdaptorCallback = transportAdaptorCallback; - } - - @Override - public void onResponse(Void aVoid) { - closeAndCallback(null); - } - - @Override - public void onFailure(Exception e) { - closeAndCallback(e); - } - - private void closeAndCallback(final Exception e) { - try { - IOUtils.close(optionalCloseable, transportAdaptorCallback::run); - } catch (final IOException inner) { - if (e != null) { - inner.addSuppressed(e); - } - throw new UncheckedIOException(inner); - } - } - } - @Override public final TransportStats getStats() { - return new TransportStats(acceptedChannels.size(), readBytesMetric.count(), readBytesMetric.sum(), transmittedBytesMetric.count(), - transmittedBytesMetric.sum()); + MeanMetric transmittedBytes = outboundHandler.getTransmittedBytes(); + return new TransportStats(acceptedChannels.size(), readBytesMetric.count(), readBytesMetric.sum(), transmittedBytes.count(), + transmittedBytes.sum()); } /** @@ -1569,7 +1302,7 @@ public void onFailure(Exception ex) { public void onTimeout() { if (countDown.fastForward()) { CloseableChannel.closeChannels(channels, false); - listener.onFailure(new ConnectTransportException(node, "connect_timeout[" + connectionProfile.getConnectTimeout() + "]")); + listener.onFailure(new ConnectTransportException(node, "connect_timeout[" + connectionProfile.getConnectTimeout() + "]")); } } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java index ea01cc4ddbfa6..0198609f4c3b6 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java @@ -51,6 +51,10 @@ void logInboundMessage(TcpChannel channel, BytesReference message) { void logOutboundMessage(TcpChannel channel, BytesReference message) { if (logger.isTraceEnabled()) { try { + if (message.get(0) != 'E') { + // This is not an Elasticsearch transport message. + return; + } BytesReference withoutHeader = message.slice(HEADER_SIZE, message.length() - HEADER_SIZE); String logMessage = format(channel, withoutHeader, "WRITE"); logger.trace(logMessage); diff --git a/server/src/main/java/org/elasticsearch/transport/Transports.java b/server/src/main/java/org/elasticsearch/transport/Transports.java index c531d33224a60..d6968a855365a 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transports.java +++ b/server/src/main/java/org/elasticsearch/transport/Transports.java @@ -38,7 +38,6 @@ public static boolean isTransportThread(Thread t) { final String threadName = t.getName(); for (String s : Arrays.asList( HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, - HttpServerTransport.HTTP_SERVER_ACCEPTOR_THREAD_NAME_PREFIX, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX, TEST_MOCK_TRANSPORT_THREAD_PREFIX)) { if (threadName.contains(s)) { diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 489a98dcbaac6..b4ce907d6f7b3 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.client.AbstractClientHeadersTestCase; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException; import org.elasticsearch.cluster.coordination.CoordinationStateRejectedException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IllegalShardRoutingStateException; @@ -809,8 +808,7 @@ public void testIds() { ids.put(148, UnknownNamedObjectException.class); ids.put(149, MultiBucketConsumerService.TooManyBucketsException.class); ids.put(150, CoordinationStateRejectedException.class); - ids.put(151, ClusterAlreadyBootstrappedException.class); - ids.put(152, SnapshotInProgressException.class); + ids.put(151, SnapshotInProgressException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequestTests.java deleted file mode 100644 index ee9c58413b350..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequestTests.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Collections; - -import static org.hamcrest.Matchers.equalTo; - -public class BootstrapClusterRequestTests extends ESTestCase { - - public void testSerialization() throws IOException { - final BootstrapConfiguration bootstrapConfiguration - = new BootstrapConfiguration(Collections.singletonList(new NodeDescription(null, randomAlphaOfLength(10)))); - final BootstrapClusterRequest original = new BootstrapClusterRequest(bootstrapConfiguration); - assertNull(original.validate()); - final BootstrapClusterRequest deserialized = copyWriteable(original, writableRegistry(), BootstrapClusterRequest::new); - assertThat(deserialized.getBootstrapConfiguration(), equalTo(bootstrapConfiguration)); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfigurationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfigurationTests.java deleted file mode 100644 index fc2e24017e76a..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfigurationTests.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNode.Role; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.EqualsHashCodeTestUtils; -import org.elasticsearch.test.EqualsHashCodeTestUtils.CopyFunction; - -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.singleton; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.startsWith; - -public class BootstrapConfigurationTests extends ESTestCase { - - public void testEqualsHashcodeSerialization() { - // Note: the explicit cast of the CopyFunction is needed for some IDE (specifically Eclipse 4.8.0) to infer the right type - EqualsHashCodeTestUtils.checkEqualsAndHashCode(randomBootstrapConfiguration(), - (CopyFunction) bootstrapConfiguration -> copyWriteable(bootstrapConfiguration, writableRegistry(), - BootstrapConfiguration::new), - this::mutate); - } - - public void testNodeDescriptionResolvedByName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - assertThat(new NodeDescription(null, expectedNode.getName()).resolve(discoveryNodes), equalTo(expectedNode)); - } - - public void testNodeDescriptionResolvedByIdAndName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - assertThat(new NodeDescription(expectedNode).resolve(discoveryNodes), equalTo(expectedNode)); - } - - public void testRejectsMismatchedId() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(randomAlphaOfLength(11), expectedNode.getName()).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("node id mismatch comparing ")); - } - - public void testRejectsMismatchedName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(expectedNode.getId(), randomAlphaOfLength(11)).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("node name mismatch comparing ")); - } - - public void testFailsIfNoMatch() { - final List discoveryNodes = randomDiscoveryNodes(); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> randomNodeDescription().resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("no node matching ")); - } - - public void testFailsIfDuplicateMatchOnName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode discoveryNode = randomFrom(discoveryNodes); - discoveryNodes.add(new DiscoveryNode(discoveryNode.getName(), randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), - singleton(Role.MASTER), Version.CURRENT)); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(null, discoveryNode.getName()).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("discovered multiple nodes matching ")); - } - - public void testFailsIfDuplicatedNode() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode discoveryNode = randomFrom(discoveryNodes); - discoveryNodes.add(discoveryNode); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(discoveryNode).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("discovered multiple nodes matching ")); - } - - public void testResolvesEntireConfiguration() { - final List discoveryNodes = randomDiscoveryNodes(); - final List selectedNodes = randomSubsetOf(randomIntBetween(1, discoveryNodes.size()), discoveryNodes); - final BootstrapConfiguration bootstrapConfiguration = new BootstrapConfiguration(selectedNodes.stream() - .map(discoveryNode -> randomBoolean() ? new NodeDescription(discoveryNode) : new NodeDescription(null, discoveryNode.getName())) - .collect(Collectors.toList())); - - final VotingConfiguration expectedConfiguration - = new VotingConfiguration(selectedNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet())); - final VotingConfiguration votingConfiguration = bootstrapConfiguration.resolve(discoveryNodes); - assertThat(votingConfiguration, equalTo(expectedConfiguration)); - } - - public void testRejectsDuplicatedDescriptions() { - final List discoveryNodes = randomDiscoveryNodes(); - final List selectedNodes = randomSubsetOf(randomIntBetween(1, discoveryNodes.size()), discoveryNodes); - final List selectedNodeDescriptions = selectedNodes.stream() - .map(discoveryNode -> randomBoolean() ? new NodeDescription(discoveryNode) : new NodeDescription(null, discoveryNode.getName())) - .collect(Collectors.toList()); - final NodeDescription toDuplicate = randomFrom(selectedNodeDescriptions); - selectedNodeDescriptions.add(randomBoolean() ? toDuplicate : new NodeDescription(null, toDuplicate.getName())); - final BootstrapConfiguration bootstrapConfiguration = new BootstrapConfiguration(selectedNodeDescriptions); - - final ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> bootstrapConfiguration.resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("multiple nodes matching ")); - } - - private NodeDescription mutate(NodeDescription original) { - if (randomBoolean()) { - return new NodeDescription(original.getId(), randomAlphaOfLength(21 - original.getName().length())); - } else { - if (original.getId() == null) { - return new NodeDescription(randomAlphaOfLength(10), original.getName()); - } else if (randomBoolean()) { - return new NodeDescription(randomAlphaOfLength(21 - original.getId().length()), original.getName()); - } else { - return new NodeDescription(null, original.getName()); - } - } - } - - protected BootstrapConfiguration mutate(BootstrapConfiguration original) { - final List newDescriptions = new ArrayList<>(original.getNodeDescriptions()); - final int mutateElement = randomIntBetween(0, newDescriptions.size()); - if (mutateElement == newDescriptions.size()) { - newDescriptions.add(randomIntBetween(0, newDescriptions.size()), randomNodeDescription()); - } else { - if (newDescriptions.size() > 1 && randomBoolean()) { - newDescriptions.remove(mutateElement); - } else { - newDescriptions.set(mutateElement, mutate(newDescriptions.get(mutateElement))); - } - } - return new BootstrapConfiguration(newDescriptions); - } - - protected NodeDescription randomNodeDescription() { - return new NodeDescription(randomBoolean() ? null : randomAlphaOfLength(10), randomAlphaOfLength(10)); - } - - protected BootstrapConfiguration randomBootstrapConfiguration() { - final int size = randomIntBetween(1, 5); - final List nodeDescriptions = new ArrayList<>(size); - while (nodeDescriptions.size() <= size) { - nodeDescriptions.add(randomNodeDescription()); - } - return new BootstrapConfiguration(nodeDescriptions); - } - - protected List randomDiscoveryNodes() { - final int size = randomIntBetween(1, 5); - final List nodes = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - nodes.add(new DiscoveryNode(randomAlphaOfLength(10), randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), - singleton(Role.MASTER), Version.CURRENT)); - } - return nodes; - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequestTests.java deleted file mode 100644 index 676e2e958cac7..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequestTests.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -import static org.hamcrest.Matchers.endsWith; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.startsWith; -import static org.hamcrest.core.Is.is; - -public class GetDiscoveredNodesRequestTests extends ESTestCase { - - public void testTimeoutValidation() { - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - assertThat("default value is 30s", getDiscoveredNodesRequest.getTimeout(), is(TimeValue.timeValueSeconds(30))); - - final TimeValue newTimeout = TimeValue.parseTimeValue(randomTimeValue(), "timeout"); - getDiscoveredNodesRequest.setTimeout(newTimeout); - assertThat("value updated", getDiscoveredNodesRequest.getTimeout(), equalTo(newTimeout)); - - final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> getDiscoveredNodesRequest.setTimeout(TimeValue.timeValueNanos(randomLongBetween(-10, -1)))); - assertThat(exception.getMessage(), startsWith("negative timeout of ")); - assertThat(exception.getMessage(), endsWith(" is not allowed")); - - getDiscoveredNodesRequest.setTimeout(null); - assertThat("value updated", getDiscoveredNodesRequest.getTimeout(), nullValue()); - } - - public void testSerialization() throws IOException { - final GetDiscoveredNodesRequest originalRequest = new GetDiscoveredNodesRequest(); - - if (randomBoolean()) { - originalRequest.setTimeout(TimeValue.parseTimeValue(randomTimeValue(), "timeout")); - } else if (randomBoolean()) { - originalRequest.setTimeout(null); - } - - final GetDiscoveredNodesRequest deserialized = copyWriteable(originalRequest, writableRegistry(), GetDiscoveredNodesRequest::new); - - assertThat(deserialized.getTimeout(), equalTo(originalRequest.getTimeout())); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponseTests.java deleted file mode 100644 index 7d2fc602e66c6..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponseTests.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNode.Role; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.singleton; -import static org.hamcrest.Matchers.equalTo; - -public class GetDiscoveredNodesResponseTests extends ESTestCase { - public void testSerialization() throws IOException { - final GetDiscoveredNodesResponse original = new GetDiscoveredNodesResponse(randomDiscoveryNodeSet()); - final GetDiscoveredNodesResponse deserialized = copyWriteable(original, writableRegistry(), GetDiscoveredNodesResponse::new); - assertThat(deserialized.getNodes(), equalTo(original.getNodes())); - } - - private Set randomDiscoveryNodeSet() { - final int size = randomIntBetween(1, 10); - final Set nodes = new HashSet<>(size); - while (nodes.size() < size) { - assertTrue(nodes.add(new DiscoveryNode(randomAlphaOfLength(10), randomAlphaOfLength(10), - UUIDs.randomBase64UUID(random()), randomAlphaOfLength(10), randomAlphaOfLength(10), buildNewFakeTransportAddress(), - emptyMap(), singleton(Role.MASTER), Version.CURRENT))); - } - return nodes; - } - - public void testConversionToBootstrapConfiguration() { - final Set nodes = randomDiscoveryNodeSet(); - assertThat(new GetDiscoveredNodesResponse(nodes).getBootstrapConfiguration().resolve(nodes).getNodeIds(), - equalTo(nodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()))); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterActionTests.java deleted file mode 100644 index 31486a52bd08f..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterActionTests.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.coordination.InMemoryPersistedState; -import org.elasticsearch.cluster.coordination.NoOpClusterApplier; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransport; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.util.Collections; -import java.util.Random; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static java.util.Collections.singletonList; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; - -public class TransportBootstrapClusterActionTests extends ESTestCase { - - private static final ActionFilters EMPTY_FILTERS = new ActionFilters(emptySet()); - - private DiscoveryNode discoveryNode; - private static ThreadPool threadPool; - private TransportService transportService; - private Coordinator coordinator; - - private static BootstrapClusterRequest exampleRequest() { - return new BootstrapClusterRequest(new BootstrapConfiguration(singletonList(new NodeDescription("id", "name")))); - } - - @BeforeClass - public static void createThreadPool() { - threadPool = new TestThreadPool("test", Settings.EMPTY); - } - - @AfterClass - public static void shutdownThreadPool() { - threadPool.shutdown(); - } - - @Before - public void setupTest() { - discoveryNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); - final MockTransport transport = new MockTransport(); - transportService = transport.createTransportService(Settings.EMPTY, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> discoveryNode, null, emptySet()); - - final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - coordinator = new Coordinator("local", Settings.EMPTY, clusterSettings, transportService, writableRegistry(), - ESAllocationTestCase.createAllocationService(Settings.EMPTY), - new MasterService("local", Settings.EMPTY, threadPool), - () -> new InMemoryPersistedState(0, ClusterState.builder(new ClusterName("cluster")).build()), r -> emptyList(), - new NoOpClusterApplier(), Collections.emptyList(), new Random(random().nextLong())); - } - - public void testHandlesNonstandardDiscoveryImplementation() throws InterruptedException { - final Discovery discovery = mock(Discovery.class); - verifyZeroInteractions(discovery); - - final String nonstandardDiscoveryType = randomFrom(DiscoveryModule.ZEN_DISCOVERY_TYPE, "single-node", "unknown"); - new TransportBootstrapClusterAction( - Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), nonstandardDiscoveryType).build(), - EMPTY_FILTERS, transportService, discovery); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, exampleRequest(), new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), equalTo("cluster bootstrapping is not supported by discovery type [" + - nonstandardDiscoveryType + "]")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsOnNonMasterEligibleNodes() throws InterruptedException { - discoveryNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - // transport service only picks up local node when started, so we can change it here ^ - - new TransportBootstrapClusterAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, exampleRequest(), new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), - equalTo("this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testSetsInitialConfiguration() throws InterruptedException { - new TransportBootstrapClusterAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - assertFalse(coordinator.isInitialConfigurationSet()); - - final BootstrapClusterRequest request - = new BootstrapClusterRequest(new BootstrapConfiguration(singletonList(new NodeDescription(discoveryNode)))); - - { - final int parallelRequests = 10; - final CountDownLatch countDownLatch = new CountDownLatch(parallelRequests); - final AtomicInteger successes = new AtomicInteger(); - - for (int i = 0; i < parallelRequests; i++) { - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, request, new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - if (response.getAlreadyBootstrapped() == false) { - successes.incrementAndGet(); - } - countDownLatch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - } - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - assertThat(successes.get(), equalTo(1)); - } - - assertTrue(coordinator.isInitialConfigurationSet()); - - { - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, request, new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - assertTrue(response.getAlreadyBootstrapped()); - countDownLatch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - } - - private abstract class ResponseHandler implements TransportResponseHandler { - @Override - public String executor() { - return Names.SAME; - } - - @Override - public BootstrapClusterResponse read(StreamInput in) throws IOException { - return new BootstrapClusterResponse(in); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesActionTests.java deleted file mode 100644 index 6d94dcf6eca14..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesActionTests.java +++ /dev/null @@ -1,533 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.Version; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; -import org.elasticsearch.cluster.coordination.CoordinationMetaData; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.coordination.InMemoryPersistedState; -import org.elasticsearch.cluster.coordination.NoOpClusterApplier; -import org.elasticsearch.cluster.coordination.PeersResponse; -import org.elasticsearch.cluster.coordination.PublicationTransportHandler; -import org.elasticsearch.cluster.coordination.PublishWithJoinResponse; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.discovery.PeersRequest; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransport; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.BytesTransportRequest; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportService.HandshakeResponse; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.EnumSet; -import java.util.Random; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static java.util.Collections.singleton; -import static java.util.Collections.singletonList; -import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING; -import static org.elasticsearch.discovery.PeerFinder.REQUEST_PEERS_ACTION_NAME; -import static org.elasticsearch.transport.TransportService.HANDSHAKE_ACTION_NAME; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.startsWith; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; - -public class TransportGetDiscoveredNodesActionTests extends ESTestCase { - - private static final ActionFilters EMPTY_FILTERS = new ActionFilters(emptySet()); - - private static ThreadPool threadPool; - private DiscoveryNode localNode; - private String clusterName; - private TransportService transportService; - private Coordinator coordinator; - private DiscoveryNode otherNode; - - @BeforeClass - public static void createThreadPool() { - threadPool = new TestThreadPool("test", Settings.EMPTY); - } - - @AfterClass - public static void shutdownThreadPool() { - threadPool.shutdown(); - } - - @Before - public void setupTest() { - clusterName = randomAlphaOfLength(10); - localNode = new DiscoveryNode( - "node1", "local", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); - otherNode = new DiscoveryNode( - "node2", "other", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); - - final MockTransport transport = new MockTransport() { - @Override - protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) { - if (action.equals(HANDSHAKE_ACTION_NAME) && node.getAddress().equals(otherNode.getAddress())) { - handleResponse(requestId, new HandshakeResponse(otherNode, new ClusterName(clusterName), Version.CURRENT)); - } - } - }; - transportService = transport.createTransportService( - Settings.builder().put(CLUSTER_NAME_SETTING.getKey(), clusterName).build(), threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, emptySet()); - - final Settings settings = Settings.builder() - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), - ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)).build(); // suppress auto-bootstrap - - final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - coordinator = new Coordinator("local", settings, clusterSettings, transportService, writableRegistry(), - ESAllocationTestCase.createAllocationService(settings), - new MasterService("local", settings, threadPool), - () -> new InMemoryPersistedState(0, ClusterState.builder(new ClusterName(clusterName)).build()), r -> emptyList(), - new NoOpClusterApplier(), Collections.emptyList(), new Random(random().nextLong())); - } - - public void testHandlesNonstandardDiscoveryImplementation() throws InterruptedException { - final Discovery discovery = mock(Discovery.class); - verifyZeroInteractions(discovery); - - final String nonstandardDiscoveryType = randomFrom(DiscoveryModule.ZEN_DISCOVERY_TYPE, "single-node", "unknown"); - new TransportGetDiscoveredNodesAction( - Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), nonstandardDiscoveryType).build(), - EMPTY_FILTERS, transportService, discovery); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, new GetDiscoveredNodesRequest(), new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), equalTo("discovered nodes are not exposed by discovery type [" + - nonstandardDiscoveryType + "]")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsOnMasterIneligibleNodes() throws InterruptedException { - localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - // transport service only picks up local node when started, so we can change it here ^ - - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, new GetDiscoveredNodesRequest(), new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), - equalTo("this node is not master-eligible, but discovered nodes are only exposed by master-eligible nodes")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsQuicklyWithZeroTimeoutAndAcceptsNullTimeout() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - { - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(null); - getDiscoveredNodesRequest.setRequiredNodes(singletonList("not-a-node")); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - } - - { - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - getDiscoveredNodesRequest.setRequiredNodes(singletonList("not-a-node")); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(ElasticsearchTimeoutException.class)); - assertThat(rootCause.getMessage(), startsWith("timed out while waiting for GetDiscoveredNodesRequest{")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - } - - public void testFailsIfAlreadyBootstrapped() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - coordinator.setInitialConfiguration(new VotingConfiguration(singleton(localNode.getId()))); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(null); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - if (exp.getRootCause() instanceof ClusterAlreadyBootstrappedException) { - countDownLatch.countDown(); - } else { - throw new AssertionError("should not be called", exp); - } - } - }); - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsIfAcceptsClusterStateWithNonemptyConfiguration() throws InterruptedException, IOException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(null); - getDiscoveredNodesRequest.setRequiredNodes(singletonList("not-a-node")); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - if (exp.getRootCause() instanceof ClusterAlreadyBootstrappedException) { - countDownLatch.countDown(); - } else { - throw new AssertionError("should not be called", exp); - } - } - }); - - ClusterState.Builder publishedClusterState = ClusterState.builder(ClusterName.DEFAULT); - publishedClusterState.incrementVersion(); - publishedClusterState.nodes(DiscoveryNodes.builder() - .add(localNode).add(otherNode).localNodeId(localNode.getId()).masterNodeId(otherNode.getId())); - publishedClusterState.metaData(MetaData.builder().coordinationMetaData(CoordinationMetaData.builder() - .term(1) - .lastAcceptedConfiguration(new VotingConfiguration(singleton(otherNode.getId()))) - .lastCommittedConfiguration(new VotingConfiguration(singleton(otherNode.getId()))) - .build())); - - transportService.sendRequest(localNode, PublicationTransportHandler.PUBLISH_STATE_ACTION_NAME, - new BytesTransportRequest(PublicationTransportHandler.serializeFullClusterState(publishedClusterState.build(), Version.CURRENT), - Version.CURRENT), - new TransportResponseHandler() { - @Override - public void handleResponse(PublishWithJoinResponse response) { - // do nothing - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - - @Override - public String executor() { - return Names.SAME; - } - - @Override - public PublishWithJoinResponse read(StreamInput in) throws IOException { - return new PublishWithJoinResponse(in); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testGetsDiscoveredNodesWithZeroTimeout() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - public void testGetsDiscoveredNodesByAddress() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getAddress().toString(), otherNode.getAddress().toString())); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - public void testGetsDiscoveredNodesByName() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getName(), otherNode.getName())); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - public void testGetsDiscoveredNodesByIP() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - String ip = localNode.getAddress().getAddress(); - getDiscoveredNodesRequest.setRequiredNodes(Collections.singletonList(ip)); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionFailedOnDuplicate(getDiscoveredNodesRequest, '[' + ip + "] matches ["); - } - - public void testGetsDiscoveredNodesDuplicateName() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - String name = localNode.getName(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(name, name)); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionFailedOnDuplicate(getDiscoveredNodesRequest, "[" + localNode + "] matches [" + name + ", " + name + ']'); - } - - public void testGetsDiscoveredNodesWithDuplicateMatchNameAndAddress() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getAddress().toString(), localNode.getName())); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionFailedOnDuplicate(getDiscoveredNodesRequest, "[" + localNode + "] matches ["); - } - - public void testGetsDiscoveredNodesTimeoutOnMissing() throws InterruptedException { - setupGetDiscoveredNodesAction(); - - final CountDownLatch latch = new CountDownLatch(1); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getAddress().toString(), "_missing")); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - assertThat(exp.getRootCause(), instanceOf(ElasticsearchTimeoutException.class)); - latch.countDown(); - } - }); - - latch.await(10L, TimeUnit.SECONDS); - } - - public void testThrowsExceptionIfDuplicateDiscoveredLater() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - final String ip = localNode.getAddress().getAddress(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(ip, "not-a-node")); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - Throwable t = exp.getRootCause(); - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), startsWith('[' + ip + "] matches [")); - countDownLatch.countDown(); - } - }); - - executeRequestPeersAction(); - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - private void executeRequestPeersAction() { - threadPool.generic().execute(() -> - transportService.sendRequest(localNode, REQUEST_PEERS_ACTION_NAME, new PeersRequest(otherNode, emptyList()), - new TransportResponseHandler() { - @Override - public PeersResponse read(StreamInput in) throws IOException { - return new PeersResponse(in); - } - - @Override - public void handleResponse(PeersResponse response) { - } - - @Override - public void handleException(TransportException exp) { - } - - @Override - public String executor() { - return Names.SAME; - } - })); - } - - private void setupGetDiscoveredNodesAction() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - executeRequestPeersAction(); - - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getName(), otherNode.getName())); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - private void assertWaitConditionMet(GetDiscoveredNodesRequest getDiscoveredNodesRequest) throws InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - assertThat(response.getNodes(), containsInAnyOrder(localNode, otherNode)); - countDownLatch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - private void assertWaitConditionFailedOnDuplicate(GetDiscoveredNodesRequest getDiscoveredNodesRequest, String message) - throws InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - Throwable t = exp.getRootCause(); - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), startsWith(message)); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - private abstract class ResponseHandler implements TransportResponseHandler { - @Override - public String executor() { - return Names.SAME; - } - - @Override - public GetDiscoveredNodesResponse read(StreamInput in) throws IOException { - return new GetDiscoveredNodesResponse(in); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index 6fddf2dd5f85b..593cdc60e8a23 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -135,20 +134,8 @@ private static PutMappingRequest createTestItem() throws IOException { String type = randomAlphaOfLength(5); request.type(type); - request.source(randomMapping()); + request.source(RandomCreateIndexGenerator.randomMapping()); return request; } - - private static XContentBuilder randomMapping() throws IOException { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - - if (randomBoolean()) { - RandomCreateIndexGenerator.randomMappingFields(builder, true); - } - - builder.endObject(); - return builder; - } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 6b8d1ab4fafb7..9f6f19596d080 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -25,7 +25,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -276,7 +276,7 @@ public void testRolloverOnExistingIndex() throws Exception { public void testRolloverWithDateMath() { ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); assumeTrue("only works on the same day", now.plusMinutes(5).getDayOfYear() == now.getDayOfYear()); - String index = "test-" + DateFormatters.forPattern("YYYY.MM.dd").format(now) + "-1"; + String index = "test-" + DateFormatter.forPattern("YYYY.MM.dd").format(now) + "-1"; String dateMathExp = ""; assertAcked(prepareCreate(dateMathExp).addAlias(new Alias("test_alias")).get()); ensureGreen(index); @@ -290,14 +290,14 @@ public void testRolloverWithDateMath() { ensureGreen(index); RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias").get(); assertThat(response.getOldIndex(), equalTo(index)); - assertThat(response.getNewIndex(), equalTo("test-" + DateFormatters.forPattern("YYYY.MM").format(now) + "-000002")); + assertThat(response.getNewIndex(), equalTo("test-" + DateFormatter.forPattern("YYYY.MM").format(now) + "-000002")); assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); response = client().admin().indices().prepareRolloverIndex("test_alias").get(); - assertThat(response.getOldIndex(), equalTo("test-" + DateFormatters.forPattern("YYYY.MM").format(now) + "-000002")); - assertThat(response.getNewIndex(), equalTo("test-" + DateFormatters.forPattern("YYYY.MM").format(now) + "-000003")); + assertThat(response.getOldIndex(), equalTo("test-" + DateFormatter.forPattern("YYYY.MM").format(now) + "-000002")); + assertThat(response.getNewIndex(), equalTo("test-" + DateFormatter.forPattern("YYYY.MM").format(now) + "-000003")); assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); @@ -310,8 +310,8 @@ public void testRolloverWithDateMath() { IndexMetaData.SETTING_INDEX_PROVIDED_NAME)); response = client().admin().indices().prepareRolloverIndex("test_alias").setNewIndexName("").get(); - assertThat(response.getOldIndex(), equalTo("test-" + DateFormatters.forPattern("YYYY.MM").format(now) + "-000003")); - assertThat(response.getNewIndex(), equalTo("test-" + DateFormatters.forPattern("YYYY.MM.dd").format(now) + "-000004")); + assertThat(response.getOldIndex(), equalTo("test-" + DateFormatter.forPattern("YYYY.MM").format(now) + "-000003")); + assertThat(response.getNewIndex(), equalTo("test-" + DateFormatter.forPattern("YYYY.MM.dd").format(now) + "-000004")); assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index 9378a2cdd86bb..328950e4f3569 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -241,6 +241,7 @@ public void run() throws IOException { public void testExpandRequestOptions() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); boolean version = randomBoolean(); + final boolean seqNoAndTerm = randomBoolean(); mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override @@ -249,13 +250,14 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL assertTrue(request.requests().stream().allMatch((r) -> "foo".equals(r.preference()))); assertTrue(request.requests().stream().allMatch((r) -> "baz".equals(r.routing()))); assertTrue(request.requests().stream().allMatch((r) -> version == r.source().version())); + assertTrue(request.requests().stream().allMatch((r) -> seqNoAndTerm == r.source().seqNoAndPrimaryTerm())); assertTrue(request.requests().stream().allMatch((r) -> postFilter.equals(r.source().postFilter()))); } }; mockSearchPhaseContext.getRequest().source(new SearchSourceBuilder() .collapse( new CollapseBuilder("someField") - .setInnerHits(new InnerHitBuilder().setName("foobarbaz").setVersion(version)) + .setInnerHits(new InnerHitBuilder().setName("foobarbaz").setVersion(version).setSeqNoAndPrimaryTerm(seqNoAndTerm)) ) .postFilter(QueryBuilders.existsQuery("foo"))) .preference("foobar") diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index a5ab81d83fbcd..e262147ef85d0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -102,9 +102,8 @@ public void testSort() { size = first.get().queryResult().size(); } int accumulatedLength = Math.min(queryResultSize, getTotalQueryHits(results)); - ScoreDoc[] sortedDocs = SearchPhaseController.sortDocs(true, results.asList(), null, new SearchPhaseController.TopDocsStats(), - from, size) - .scoreDocs; + ScoreDoc[] sortedDocs = SearchPhaseController.sortDocs(true, results.asList(), null, + new SearchPhaseController.TopDocsStats(SearchContext.TRACK_TOTAL_HITS_ACCURATE), from, size).scoreDocs; for (Suggest.Suggestion suggestion : reducedSuggest(results)) { int suggestionSize = suggestion.getEntries().get(0).getOptions().size(); accumulatedLength += suggestionSize; @@ -126,12 +125,12 @@ public void testSortIsIdempotent() throws Exception { from = first.get().queryResult().from(); size = first.get().queryResult().size(); } - SearchPhaseController.TopDocsStats topDocsStats = new SearchPhaseController.TopDocsStats(); + SearchPhaseController.TopDocsStats topDocsStats = new SearchPhaseController.TopDocsStats(SearchContext.TRACK_TOTAL_HITS_ACCURATE); ScoreDoc[] sortedDocs = SearchPhaseController.sortDocs(ignoreFrom, results.asList(), null, topDocsStats, from, size).scoreDocs; results = generateSeededQueryResults(randomSeed, nShards, Collections.emptyList(), queryResultSize, useConstantScore); - SearchPhaseController.TopDocsStats topDocsStats2 = new SearchPhaseController.TopDocsStats(); + SearchPhaseController.TopDocsStats topDocsStats2 = new SearchPhaseController.TopDocsStats(SearchContext.TRACK_TOTAL_HITS_ACCURATE); ScoreDoc[] sortedDocs2 = SearchPhaseController.sortDocs(ignoreFrom, results.asList(), null, topDocsStats2, from, size).scoreDocs; assertEquals(sortedDocs.length, sortedDocs2.length); for (int i = 0; i < sortedDocs.length; i++) { @@ -139,7 +138,7 @@ public void testSortIsIdempotent() throws Exception { assertEquals(sortedDocs[i].shardIndex, sortedDocs2[i].shardIndex); assertEquals(sortedDocs[i].score, sortedDocs2[i].score, 0.0f); } - assertEquals(topDocsStats.maxScore, topDocsStats2.maxScore, 0.0f); + assertEquals(topDocsStats.getMaxScore(), topDocsStats2.getMaxScore(), 0.0f); assertEquals(topDocsStats.getTotalHits().value, topDocsStats2.getTotalHits().value); assertEquals(topDocsStats.getTotalHits().relation, topDocsStats2.getTotalHits().relation); assertEquals(topDocsStats.fetchHits, topDocsStats2.fetchHits); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java new file mode 100644 index 0000000000000..d02b712eaaef3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -0,0 +1,561 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.range.InternalDateRange; +import org.elasticsearch.search.aggregations.bucket.range.Range; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.SearchProfileShardResults; +import org.elasticsearch.search.profile.SearchProfileShardResultsTests; +import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.suggest.completion.CompletionSuggestion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.RemoteClusterAware; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.PriorityQueue; +import java.util.TreeMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class SearchResponseMergerTests extends ESTestCase { + + private int numResponses; + private ExecutorService executorService; + + @Before + public void init() { + numResponses = randomIntBetween(2, 10); + executorService = Executors.newFixedThreadPool(numResponses); + } + + private void addResponse(SearchResponseMerger searchResponseMerger, SearchResponse searchResponse) { + if (randomBoolean()) { + executorService.submit(() -> searchResponseMerger.add(searchResponse)); + } else { + searchResponseMerger.add(searchResponse); + } + } + + private void awaitResponsesAdded() throws InterruptedException { + executorService.shutdown(); + executorService.awaitTermination(5, TimeUnit.SECONDS); + } + + public void testMergeTookInMillis() throws InterruptedException { + long currentRelativeTime = randomLong(); + SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); + SearchResponseMerger merger = new SearchResponseMerger(randomIntBetween(0, 1000), randomIntBetween(0, 10000), + SearchContext.TRACK_TOTAL_HITS_ACCURATE, timeProvider, flag -> null); + for (int i = 0; i < numResponses; i++) { + SearchResponse searchResponse = new SearchResponse(InternalSearchResponse.empty(), null, 1, 1, 0, randomLong(), + ShardSearchFailure.EMPTY_ARRAY, SearchResponseTests.randomClusters()); + addResponse(merger, searchResponse); + } + awaitResponsesAdded(); + SearchResponse searchResponse = merger.getMergedResponse(SearchResponse.Clusters.EMPTY); + assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); + } + + public void testMergeShardFailures() throws InterruptedException { + SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); + SearchResponseMerger merger = new SearchResponseMerger(0, 0, SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, flag -> null); + PriorityQueue> priorityQueue = new PriorityQueue<>(Comparator.comparing(Tuple::v1)); + int numIndices = numResponses * randomIntBetween(1, 3); + Iterator> indicesPerCluster = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); + for (int i = 0; i < numResponses; i++) { + Map.Entry entry = indicesPerCluster.next(); + String clusterAlias = entry.getKey(); + Index[] indices = entry.getValue(); + int numFailures = randomIntBetween(1, 10); + ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; + for (int j = 0; j < numFailures; j++) { + ShardId shardId = new ShardId(randomFrom(indices), j); + ShardSearchFailure failure; + if (randomBoolean()) { + SearchShardTarget searchShardTarget = new SearchShardTarget(randomAlphaOfLength(6), shardId, clusterAlias, null); + failure = new ShardSearchFailure(new IllegalArgumentException(), searchShardTarget); + } else { + ElasticsearchException elasticsearchException = new ElasticsearchException(new IllegalArgumentException()); + elasticsearchException.setShard(shardId); + failure = new ShardSearchFailure(elasticsearchException); + } + shardSearchFailures[j] = failure; + priorityQueue.add(Tuple.tuple(shardId, failure)); + } + SearchResponse searchResponse = new SearchResponse(InternalSearchResponse.empty(), null, + 1, 1, 0, 100L, shardSearchFailures, SearchResponse.Clusters.EMPTY); + addResponse(merger, searchResponse); + } + awaitResponsesAdded(); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(priorityQueue.size(), mergedResponse.getFailedShards()); + ShardSearchFailure[] shardFailures = mergedResponse.getShardFailures(); + assertEquals(priorityQueue.size(), shardFailures.length); + for (ShardSearchFailure shardFailure : shardFailures) { + ShardSearchFailure expected = priorityQueue.poll().v2(); + assertSame(expected, shardFailure); + } + } + + public void testMergeShardFailuresNullShardId() throws InterruptedException { + SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); + SearchResponseMerger merger = new SearchResponseMerger(0, 0, SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, flag -> null); + List expectedFailures = new ArrayList<>(); + for (int i = 0; i < numResponses; i++) { + int numFailures = randomIntBetween(1, 50); + ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; + for (int j = 0; j < numFailures; j++) { + ShardSearchFailure shardSearchFailure = new ShardSearchFailure(new ElasticsearchException(new IllegalArgumentException())); + shardSearchFailures[j] = shardSearchFailure; + expectedFailures.add(shardSearchFailure); + } + SearchResponse searchResponse = new SearchResponse(InternalSearchResponse.empty(), null, + 1, 1, 0, 100L, shardSearchFailures, SearchResponse.Clusters.EMPTY); + addResponse(merger, searchResponse); + } + awaitResponsesAdded(); + ShardSearchFailure[] shardFailures = merger.getMergedResponse(SearchResponse.Clusters.EMPTY).getShardFailures(); + assertThat(Arrays.asList(shardFailures), containsInAnyOrder(expectedFailures.toArray(ShardSearchFailure.EMPTY_ARRAY))); + } + + public void testMergeProfileResults() throws InterruptedException { + SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); + SearchResponseMerger merger = new SearchResponseMerger(0, 0, SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, flag -> null); + Map expectedProfile = new HashMap<>(); + for (int i = 0; i < numResponses; i++) { + SearchProfileShardResults profile = SearchProfileShardResultsTests.createTestItem(); + expectedProfile.putAll(profile.getShardResults()); + SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, profile, false, null, 1); + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, 1, 1, 0, 100L, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + addResponse(merger, searchResponse); + } + awaitResponsesAdded(); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(0, mergedResponse.getFailedShards()); + assertEquals(0, mergedResponse.getShardFailures().length); + assertEquals(expectedProfile, mergedResponse.getProfileResults()); + } + + public void testMergeSuggestions() throws InterruptedException { + String suggestionName = randomAlphaOfLengthBetween(4, 8); + boolean skipDuplicates = randomBoolean(); + int size = randomIntBetween(1, 100); + SearchResponseMerger searchResponseMerger = new SearchResponseMerger(0, 0, 0, new SearchTimeProvider(0, 0, () -> 0), flag -> null); + for (int i = 0; i < numResponses; i++) { + List>> suggestions = + new ArrayList<>(); + CompletionSuggestion completionSuggestion = new CompletionSuggestion(suggestionName, size, skipDuplicates); + CompletionSuggestion.Entry options = new CompletionSuggestion.Entry(new Text("suggest"), 0, 10); + options.addOption(new CompletionSuggestion.Entry.Option(randomInt(), new Text("suggestion"), i, Collections.emptyMap())); + completionSuggestion.addTerm(options); + suggestions.add(completionSuggestion); + Suggest suggest = new Suggest(suggestions); + SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, suggest, null, false, null, 1); + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, 1, 1, 0, randomLong(), + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + addResponse(searchResponseMerger, searchResponse); + } + awaitResponsesAdded(); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(0, mergedResponse.getFailedShards()); + assertEquals(0, mergedResponse.getShardFailures().length); + Suggest.Suggestion> suggestion = + mergedResponse.getSuggest().getSuggestion(suggestionName); + assertEquals(1, suggestion.getEntries().size()); + Suggest.Suggestion.Entry options = suggestion.getEntries().get(0); + assertEquals(skipDuplicates ? 1 : Math.min(numResponses, size), options.getOptions().size()); + int i = numResponses; + for (Suggest.Suggestion.Entry.Option option : options) { + assertEquals("suggestion", option.getText().string()); + assertEquals(--i, option.getScore(), 0f); + } + } + + public void testMergeAggs() throws InterruptedException { + SearchResponseMerger searchResponseMerger = new SearchResponseMerger(0, 0, 0, new SearchTimeProvider(0, 0, () -> 0), + flag -> new InternalAggregation.ReduceContext(null, null, flag)); + String maxAggName = randomAlphaOfLengthBetween(5, 8); + String rangeAggName = randomAlphaOfLengthBetween(5, 8); + int totalCount = 0; + double maxValue = Double.MIN_VALUE; + for (int i = 0; i < numResponses; i++) { + double value = randomDouble(); + maxValue = Math.max(value, maxValue); + InternalMax max = new InternalMax(maxAggName, value, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()); + InternalDateRange.Factory factory = new InternalDateRange.Factory(); + int count = randomIntBetween(1, 1000); + totalCount += count; + InternalDateRange.Bucket bucket = factory.createBucket("bucket", 0, 10000, count, InternalAggregations.EMPTY, + false, DocValueFormat.RAW); + InternalDateRange range = factory.create(rangeAggName, Collections.singletonList(bucket), DocValueFormat.RAW, false, + Collections.emptyList(), Collections.emptyMap()); + InternalAggregations aggs = new InternalAggregations(Arrays.asList(range, max)); + SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, aggs, null, null, false, null, 1); + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, 1, 1, 0, randomLong(), + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + addResponse(searchResponseMerger, searchResponse); + } + awaitResponsesAdded(); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(0, mergedResponse.getFailedShards()); + assertEquals(0, mergedResponse.getShardFailures().length); + assertEquals(0, mergedResponse.getHits().getHits().length); + assertEquals(2, mergedResponse.getAggregations().asList().size()); + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(maxValue, max.getValue(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(totalCount, bucket.getDocCount()); + } + + public void testMergeSearchHits() throws InterruptedException { + final long currentRelativeTime = randomLong(); + final SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); + final int size = randomIntBetween(0, 100); + final int from = size > 0 ? randomIntBetween(0, 100) : 0; + final int requestedSize = from + size; + final SortField[] sortFields; + final String collapseField; + boolean scoreSort = false; + if (randomBoolean()) { + int numFields = randomIntBetween(1, 3); + sortFields = new SortField[numFields]; + for (int i = 0; i < numFields; i++) { + final SortField sortField; + if (randomBoolean()) { + sortField = new SortField("field-" + i, SortField.Type.INT, randomBoolean()); + } else { + scoreSort = true; + sortField = SortField.FIELD_SCORE; + } + sortFields[i] = sortField; + } + collapseField = randomBoolean() ? "collapse" : null; + } else { + collapseField = null; + sortFields = null; + scoreSort = true; + } + Tuple randomTrackTotalHits = randomTrackTotalHits(); + int trackTotalHitsUpTo = randomTrackTotalHits.v1(); + TotalHits.Relation totalHitsRelation = randomTrackTotalHits.v2(); + + PriorityQueue priorityQueue = new PriorityQueue<>(new SearchHitComparator(sortFields)); + SearchResponseMerger searchResponseMerger = new SearchResponseMerger(from, size, trackTotalHitsUpTo, timeProvider, flag -> null); + + TotalHits expectedTotalHits = null; + int expectedTotal = 0; + int expectedSuccessful = 0; + int expectedSkipped = 0; + int expectedReducePhases = 1; + boolean expectedTimedOut = false; + Boolean expectedTerminatedEarly = null; + float expectedMaxScore = Float.NEGATIVE_INFINITY; + int numIndices = requestedSize == 0 ? 0 : randomIntBetween(1, requestedSize); + Iterator> indicesIterator = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); + for (int i = 0; i < numResponses; i++) { + Map.Entry entry = indicesIterator.next(); + String clusterAlias = entry.getKey().equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) ? null : entry.getKey(); + Index[] indices = entry.getValue(); + int total = randomIntBetween(1, 1000); + expectedTotal += total; + int successful = randomIntBetween(1, total); + expectedSuccessful += successful; + int skipped = randomIntBetween(1, total); + expectedSkipped += skipped; + + TotalHits totalHits = null; + if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { + totalHits = new TotalHits(randomLongBetween(0, 1000), totalHitsRelation); + long previousValue = expectedTotalHits == null ? 0 : expectedTotalHits.value; + expectedTotalHits = new TotalHits(Math.min(previousValue + totalHits.value, trackTotalHitsUpTo), totalHitsRelation); + } + + final int numDocs = totalHits == null || totalHits.value >= requestedSize ? requestedSize : (int) totalHits.value; + int scoreFactor = randomIntBetween(1, numResponses); + float maxScore = scoreSort ? numDocs * scoreFactor : Float.NaN; + SearchHit[] hits = randomSearchHitArray(numDocs, numResponses, clusterAlias, indices, maxScore, scoreFactor, + sortFields, priorityQueue); + expectedMaxScore = Math.max(expectedMaxScore, maxScore); + + Object[] collapseValues = null; + if (collapseField != null) { + collapseValues = new Object[numDocs]; + for (int j = 0; j < numDocs; j++) { + //set different collapse values for each cluster for simplicity + collapseValues[j] = j + 1000 * i; + } + } + + SearchHits searchHits = new SearchHits(hits, totalHits, maxScore == Float.NEGATIVE_INFINITY ? Float.NaN : maxScore, + sortFields, collapseField, collapseValues); + + int numReducePhases = randomIntBetween(1, 5); + expectedReducePhases += numReducePhases; + boolean timedOut = rarely(); + expectedTimedOut = expectedTimedOut || timedOut; + Boolean terminatedEarly = frequently() ? null : true; + expectedTerminatedEarly = expectedTerminatedEarly == null ? terminatedEarly : expectedTerminatedEarly; + + InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + searchHits, null, null, null, timedOut, terminatedEarly, numReducePhases); + + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, total, successful, skipped, + randomLong(), ShardSearchFailure.EMPTY_ARRAY, SearchResponseTests.randomClusters()); + + addResponse(searchResponseMerger, searchResponse); + } + + awaitResponsesAdded(); + + final SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse searchResponse = searchResponseMerger.getMergedResponse(clusters); + + assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); + assertEquals(expectedTotal, searchResponse.getTotalShards()); + assertEquals(expectedSuccessful, searchResponse.getSuccessfulShards()); + assertEquals(expectedSkipped, searchResponse.getSkippedShards()); + assertEquals(0, searchResponse.getShardFailures().length); + assertEquals(expectedReducePhases, searchResponse.getNumReducePhases()); + assertEquals(expectedTimedOut, searchResponse.isTimedOut()); + assertEquals(expectedTerminatedEarly, searchResponse.isTerminatedEarly()); + + assertSame(clusters, searchResponse.getClusters()); + assertNull(searchResponse.getScrollId()); + + SearchHits searchHits = searchResponse.getHits(); + assertArrayEquals(sortFields, searchHits.getSortFields()); + assertEquals(collapseField, searchHits.getCollapseField()); + if (expectedTotalHits == null) { + assertNull(searchHits.getTotalHits()); + } else { + assertNotNull(searchHits.getTotalHits()); + assertEquals(expectedTotalHits.value, searchHits.getTotalHits().value); + assertSame(expectedTotalHits.relation, searchHits.getTotalHits().relation); + } + if (expectedMaxScore == Float.NEGATIVE_INFINITY) { + assertTrue(Float.isNaN(searchHits.getMaxScore())); + } else { + assertEquals(expectedMaxScore, searchHits.getMaxScore(), 0f); + } + + for (int i = 0; i < from; i++) { + priorityQueue.poll(); + } + SearchHit[] hits = searchHits.getHits(); + if (collapseField != null) { + assertEquals(hits.length, searchHits.getCollapseValues().length); + } else { + assertNull(searchHits.getCollapseValues()); + } + assertThat(hits.length, lessThanOrEqualTo(size)); + for (SearchHit hit : hits) { + SearchHit expected = priorityQueue.poll(); + assertSame(expected, hit); + } + } + + private static Tuple randomTrackTotalHits() { + switch(randomIntBetween(0, 2)) { + case 0: + return Tuple.tuple(SearchContext.TRACK_TOTAL_HITS_DISABLED, null); + case 1: + return Tuple.tuple(randomIntBetween(10, 1000), TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + case 2: + return Tuple.tuple(SearchContext.TRACK_TOTAL_HITS_ACCURATE, TotalHits.Relation.EQUAL_TO); + default: + throw new UnsupportedOperationException(); + } + } + + private static SearchHit[] randomSearchHitArray(int numDocs, int numResponses, String clusterAlias, Index[] indices, float maxScore, + int scoreFactor, SortField[] sortFields, PriorityQueue priorityQueue) { + SearchHit[] hits = new SearchHit[numDocs]; + + int[] sortFieldFactors = new int[sortFields == null ? 0 : sortFields.length]; + for (int j = 0; j < sortFieldFactors.length; j++) { + sortFieldFactors[j] = randomIntBetween(1, numResponses); + } + + for (int j = 0; j < numDocs; j++) { + ShardId shardId = new ShardId(randomFrom(indices), randomIntBetween(0, 10)); + SearchShardTarget shardTarget = new SearchShardTarget(randomAlphaOfLengthBetween(3, 8), shardId, + clusterAlias, OriginalIndices.NONE); + SearchHit hit = new SearchHit(randomIntBetween(0, Integer.MAX_VALUE)); + + float score = Float.NaN; + if (Float.isNaN(maxScore) == false) { + score = (maxScore - j) * scoreFactor; + hit.score(score); + } + + hit.shard(shardTarget); + if (sortFields != null) { + Object[] rawSortValues = new Object[sortFields.length]; + DocValueFormat[] docValueFormats = new DocValueFormat[sortFields.length]; + for (int k = 0; k < sortFields.length; k++) { + SortField sortField = sortFields[k]; + if (sortField == SortField.FIELD_SCORE) { + hit.score(score); + rawSortValues[k] = score; + } else { + rawSortValues[k] = sortField.getReverse() ? numDocs * sortFieldFactors[k] - j : j; + } + docValueFormats[k] = DocValueFormat.RAW; + } + hit.sortValues(rawSortValues, docValueFormats); + } + hits[j] = hit; + priorityQueue.add(hit); + } + return hits; + } + + private static Map randomRealisticIndices(int numIndices, int numClusters) { + String[] indicesNames = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indicesNames[i] = randomAlphaOfLengthBetween(5, 10); + } + Map indicesPerCluster = new TreeMap<>(); + for (int i = 0; i < numClusters; i++) { + Index[] indices = new Index[indicesNames.length]; + for (int j = 0; j < indices.length; j++) { + //Realistically clusters have the same indices with same names, but different uuid + indices[j] = new Index(indicesNames[j], randomAlphaOfLength(10)); + } + String clusterAlias; + if (frequently() || indicesPerCluster.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + clusterAlias = randomAlphaOfLengthBetween(5, 10); + } else { + clusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + } + indicesPerCluster.put(clusterAlias, indices); + } + return indicesPerCluster; + } + + private static final class SearchHitComparator implements Comparator { + + private final SortField[] sortFields; + + SearchHitComparator(SortField[] sortFields) { + this.sortFields = sortFields; + } + + @Override + public int compare(SearchHit a, SearchHit b) { + if (sortFields == null) { + int scoreCompare = Float.compare(b.getScore(), a.getScore()); + if (scoreCompare != 0) { + return scoreCompare; + } + } else { + for (int i = 0; i < sortFields.length; i++) { + SortField sortField = sortFields[i]; + if (sortField == SortField.FIELD_SCORE) { + int scoreCompare = Float.compare(b.getScore(), a.getScore()); + if (scoreCompare != 0) { + return scoreCompare; + } + } else { + Integer aSortValue = (Integer)a.getRawSortValues()[i]; + Integer bSortValue = (Integer)b.getRawSortValues()[i]; + final int compare; + if (sortField.getReverse()) { + compare = Integer.compare(bSortValue, aSortValue); + } else { + compare = Integer.compare(aSortValue, bSortValue); + } + if (compare != 0) { + return compare; + } + } + } + } + int shardIdCompareTo = a.getShard().getShardId().compareTo(b.getShard().getShardId()); + if (shardIdCompareTo != 0) { + return shardIdCompareTo; + } + return Integer.compare(a.docId(), b.docId()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index ecd2dc44a702e..f07be38765f66 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -128,7 +128,7 @@ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... sha shardSearchFailures, randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY); } - private static SearchResponse.Clusters randomClusters() { + static SearchResponse.Clusters randomClusters() { int totalClusters = randomIntBetween(0, 10); int successfulClusters = randomIntBetween(0, totalClusters); int skippedClusters = totalClusters - successfulClusters; diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 16ff4389d7c4a..1b99beee65e81 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; @@ -38,13 +39,19 @@ import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.NodeDisconnectedException; +import org.elasticsearch.transport.RemoteClusterConnectionTests; import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.RemoteClusterServiceTests; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; @@ -53,13 +60,22 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.Function; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.startsWith; public class TransportSearchActionTests extends ESTestCase { @@ -304,41 +320,169 @@ public void close() { } } - public void testBuildClusters() { - OriginalIndices localIndices = randomBoolean() ? null : randomOriginalIndices(); - Map remoteIndices = new HashMap<>(); - Map searchShardsResponses = new HashMap<>(); - int numRemoteClusters = randomIntBetween(0, 10); - boolean onlySuccessful = randomBoolean(); - int localClusters = localIndices == null ? 0 : 1; - int total = numRemoteClusters + localClusters; - int successful = localClusters; - int skipped = 0; - for (int i = 0; i < numRemoteClusters; i++) { - String cluster = randomAlphaOfLengthBetween(5, 10); - remoteIndices.put(cluster, randomOriginalIndices()); - if (onlySuccessful || randomBoolean()) { - //whatever response counts as successful as long as it's not the empty placeholder - searchShardsResponses.put(cluster, new ClusterSearchShardsResponse(null, null, null)); - successful++; - } else { - searchShardsResponses.put(cluster, ClusterSearchShardsResponse.EMPTY); - skipped++; - } - } - SearchResponse.Clusters clusters = TransportSearchAction.buildClusters(localIndices, remoteIndices, searchShardsResponses); - assertEquals(total, clusters.getTotal()); - assertEquals(successful, clusters.getSuccessful()); - assertEquals(skipped, clusters.getSkipped()); + private MockTransportService startTransport(String id, List knownNodes) { + return RemoteClusterConnectionTests.startTransport(id, knownNodes, Version.CURRENT, threadPool); } - private static OriginalIndices randomOriginalIndices() { - int numLocalIndices = randomIntBetween(0, 5); - String[] localIndices = new String[numLocalIndices]; - for (int i = 0; i < numLocalIndices; i++) { - localIndices[i] = randomAlphaOfLengthBetween(3, 10); + public void testCollectSearchShards() throws Exception { + int numClusters = randomIntBetween(2, 10); + MockTransportService[] mockTransportServices = new MockTransportService[numClusters]; + DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; + Map remoteIndicesByCluster = new HashMap<>(); + Settings.Builder builder = Settings.builder(); + for (int i = 0; i < numClusters; i++) { + List knownNodes = new CopyOnWriteArrayList<>(); + MockTransportService remoteSeedTransport = startTransport("node_remote" + i, knownNodes); + mockTransportServices[i] = remoteSeedTransport; + DiscoveryNode remoteSeedNode = remoteSeedTransport.getLocalDiscoNode(); + knownNodes.add(remoteSeedNode); + nodes[i] = remoteSeedNode; + builder.put("cluster.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString()); + remoteIndicesByCluster.put("remote" + i, new OriginalIndices(new String[]{"index"}, IndicesOptions.lenientExpandOpen())); + } + Settings settings = builder.build(); + + try { + try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicReference> response = new AtomicReference<>(); + AtomicInteger skippedClusters = new AtomicInteger(); + TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, skippedClusters, + remoteIndicesByCluster, remoteClusterService, threadPool, + new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch)); + awaitLatch(latch, 5, TimeUnit.SECONDS); + assertEquals(0, skippedClusters.get()); + assertNotNull(response.get()); + Map map = response.get(); + assertEquals(numClusters, map.size()); + for (int i = 0; i < numClusters; i++) { + String clusterAlias = "remote" + i; + assertTrue(map.containsKey(clusterAlias)); + ClusterSearchShardsResponse shardsResponse = map.get(clusterAlias); + assertEquals(1, shardsResponse.getNodes().length); + } + } + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicReference failure = new AtomicReference<>(); + AtomicInteger skippedClusters = new AtomicInteger(0); + TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), "index_not_found", null, skippedClusters, + remoteIndicesByCluster, remoteClusterService, threadPool, + new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch)); + awaitLatch(latch, 5, TimeUnit.SECONDS); + assertEquals(0, skippedClusters.get()); + assertNotNull(failure.get()); + assertThat(failure.get(), instanceOf(RemoteTransportException.class)); + RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); + assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); + } + + int numDisconnectedClusters = randomIntBetween(1, numClusters); + Set disconnectedNodes = new HashSet<>(numDisconnectedClusters); + Set disconnectedNodesIndices = new HashSet<>(numDisconnectedClusters); + while (disconnectedNodes.size() < numDisconnectedClusters) { + int i = randomIntBetween(0, numClusters - 1); + if (disconnectedNodes.add(nodes[i])) { + assertTrue(disconnectedNodesIndices.add(i)); + } + } + + CountDownLatch disconnectedLatch = new CountDownLatch(numDisconnectedClusters); + RemoteClusterServiceTests.addConnectionListener(remoteClusterService, new TransportConnectionListener() { + @Override + public void onNodeDisconnected(DiscoveryNode node) { + if (disconnectedNodes.remove(node)) { + disconnectedLatch.countDown(); + } + } + }); + for (DiscoveryNode disconnectedNode : disconnectedNodes) { + service.addFailToSendNoConnectRule(disconnectedNode.getAddress()); + } + + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicInteger skippedClusters = new AtomicInteger(0); + AtomicReference failure = new AtomicReference<>(); + TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, skippedClusters, + remoteIndicesByCluster, remoteClusterService, threadPool, + new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch)); + awaitLatch(latch, 5, TimeUnit.SECONDS); + assertEquals(0, skippedClusters.get()); + assertNotNull(failure.get()); + assertThat(failure.get(), instanceOf(RemoteTransportException.class)); + assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster [")); + assertThat(failure.get().getCause(), instanceOf(NodeDisconnectedException.class)); + } + + //setting skip_unavailable to true for all the disconnected clusters will make the request succeed again + for (int i : disconnectedNodesIndices) { + RemoteClusterServiceTests.updateSkipUnavailable(remoteClusterService, "remote" + i, true); + } + + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicInteger skippedClusters = new AtomicInteger(0); + AtomicReference> response = new AtomicReference<>(); + TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, skippedClusters, + remoteIndicesByCluster, remoteClusterService, threadPool, + new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch)); + awaitLatch(latch, 5, TimeUnit.SECONDS); + assertNotNull(response.get()); + Map map = response.get(); + assertEquals(numClusters - disconnectedNodesIndices.size(), map.size()); + assertEquals(skippedClusters.get(), disconnectedNodesIndices.size()); + for (int i = 0; i < numClusters; i++) { + String clusterAlias = "remote" + i; + if (disconnectedNodesIndices.contains(i)) { + assertFalse(map.containsKey(clusterAlias)); + } else { + assertNotNull(map.get(clusterAlias)); + } + } + } + + //give transport service enough time to realize that the node is down, and to notify the connection listeners + //so that RemoteClusterConnection is left with no connected nodes, hence it will retry connecting next + assertTrue(disconnectedLatch.await(5, TimeUnit.SECONDS)); + + service.clearAllRules(); + if (randomBoolean()) { + for (int i : disconnectedNodesIndices) { + if (randomBoolean()) { + RemoteClusterServiceTests.updateSkipUnavailable(remoteClusterService, "remote" + i, true); + } + + } + } + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicInteger skippedClusters = new AtomicInteger(0); + AtomicReference> response = new AtomicReference<>(); + TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, skippedClusters, + remoteIndicesByCluster, remoteClusterService, threadPool, + new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch)); + awaitLatch(latch, 5, TimeUnit.SECONDS); + assertEquals(0, skippedClusters.get()); + assertNotNull(response.get()); + Map map = response.get(); + assertEquals(numClusters, map.size()); + for (int i = 0; i < numClusters; i++) { + String clusterAlias = "remote" + i; + assertTrue(map.containsKey(clusterAlias)); + assertNotNull(map.get(clusterAlias)); + } + } + assertEquals(0, service.getConnectionManager().size()); + } + } finally { + for (MockTransportService mockTransportService : mockTransportServices) { + mockTransportService.close(); + } } - return new OriginalIndices(localIndices, IndicesOptions.fromOptions(randomBoolean(), - randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 14919b7e9f0bc..8025d588820f9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -66,6 +66,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateExists; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; @@ -118,6 +119,23 @@ public void testMetadata() throws Exception { assertThat(clusterStateResponse.getState().metaData().indices().size(), is(0)); } + public void testMetadataVersion() { + createIndex("index-1"); + createIndex("index-2"); + long metadataVersion = client().admin().cluster().prepareState().get().getState().metaData().version(); + assertThat(metadataVersion, greaterThan(0L)); + assertThat(client().admin().cluster().prepareState().setIndices("index-1").get().getState().metaData().version(), + equalTo(metadataVersion)); + assertThat(client().admin().cluster().prepareState().setIndices("index-2").get().getState().metaData().version(), + equalTo(metadataVersion)); + assertThat(client().admin().cluster().prepareState().setIndices("*").get().getState().metaData().version(), + equalTo(metadataVersion)); + assertThat(client().admin().cluster().prepareState().setIndices("not-found").get().getState().metaData().version(), + equalTo(metadataVersion)); + assertThat(client().admin().cluster().prepareState().clear().setMetaData(false).get().getState().metaData().version(), + equalTo(0L)); + } + public void testIndexTemplates() throws Exception { client().admin().indices().preparePutTemplate("foo_template") .setPatterns(Collections.singletonList("te*")) diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 3d945b7a7bb68..aaef1e58fb50e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -20,6 +20,9 @@ package org.elasticsearch.cluster; import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; +import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.MasterNotDiscoveredException; @@ -28,10 +31,12 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -45,10 +50,22 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // does unsafe things .put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 1).build(); } + @Override + protected List addExtraClusterBootstrapSettings(List allNodesSettings) { + // if it's the first master in the cluster bootstrap the cluster with this node name + Settings settings = allNodesSettings.get(0); + if (internalCluster().numMasterNodes() == 0 && settings.getAsBoolean(Node.NODE_MASTER_SETTING.getKey(), false)) { + return Collections.singletonList(Settings.builder() + .put(settings) + .put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), settings.get(Node.NODE_NAME_SETTING.getKey())) + .build()); + } + return allNodesSettings; + } + public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) @@ -89,7 +106,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } - public void testElectOnlyBetweenMasterNodes() throws IOException { + public void testElectOnlyBetweenMasterNodes() throws IOException, ExecutionException, InterruptedException { logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) .put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); @@ -119,6 +136,8 @@ public void testElectOnlyBetweenMasterNodes() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> closing master node (1)"); + client().execute(AddVotingConfigExclusionsAction.INSTANCE, + new AddVotingConfigExclusionsRequest(new String[]{masterNodeName})).get(); internalCluster().stopCurrentMasterNode(); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); @@ -126,7 +145,7 @@ public void testElectOnlyBetweenMasterNodes() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); } - public void testAliasFilterValidation() throws Exception { + public void testAliasFilterValidation() { logger.info("--> start master node / non data"); internalCluster().startNode(Settings.builder() .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java index 542247c058861..c9ebdf278c71d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -20,49 +20,52 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterResponse; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesAction; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.Settings.Builder; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransport; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; import org.junit.Before; -import java.util.Set; +import java.util.Collections; +import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING; import static org.elasticsearch.common.settings.Settings.builder; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; public class ClusterBootstrapServiceTests extends ESTestCase { private DiscoveryNode localNode, otherNode1, otherNode2; private DeterministicTaskQueue deterministicTaskQueue; private TransportService transportService; - private ClusterBootstrapService clusterBootstrapService; @Before public void createServices() { @@ -81,10 +84,6 @@ protected void onSendRequest(long requestId, String action, TransportRequest req transportService = transport.createTransportService(Settings.EMPTY, deterministicTaskQueue.getThreadPool(), TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, emptySet()); - - clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), - localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), - transportService); } private DiscoveryNode newDiscoveryNode(String nodeName) { @@ -92,152 +91,404 @@ private DiscoveryNode newDiscoveryNode(String nodeName) { Version.CURRENT); } - private void startServices() { - transportService.start(); - transportService.acceptIncomingRequests(); - clusterBootstrapService.start(); - } + public void testBootstrapsAutomaticallyWithDefaultConfiguration() { + final Settings.Builder settings = Settings.builder(); + final long timeout; + if (randomBoolean()) { + timeout = UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(Settings.EMPTY).millis(); + } else { + timeout = randomLongBetween(1, 10000); + settings.put(UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.getKey(), timeout + "ms"); + } - public void testDoesNothingOnNonMasterNodes() { - localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> { - throw new AssertionError("should not make a discovery request"); - }); + final AtomicReference>> discoveredNodesSupplier = new AtomicReference<>(() -> { + throw new AssertionError("should not be called yet"); + }); - startServices(); - deterministicTaskQueue.runAllTasks(); + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService + = new ClusterBootstrapService(settings.build(), transportService, () -> discoveredNodesSupplier.get().get(), + () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), + equalTo(Stream.of(localNode, otherNode1, otherNode2).map(DiscoveryNode::getId).collect(Collectors.toSet()))); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), greaterThanOrEqualTo(timeout)); + }); + + deterministicTaskQueue.scheduleAt(timeout - 1, + () -> discoveredNodesSupplier.set(() -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()))); + + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); + deterministicTaskQueue.runAllTasksInTimeOrder(); + assertTrue(bootstrapped.get()); } public void testDoesNothingByDefaultIfHostsProviderConfigured() { - testConfiguredIfSettingSet(builder().putList(DISCOVERY_HOSTS_PROVIDER_SETTING.getKey())); + testDoesNothingWithSettings(builder().putList(DISCOVERY_HOSTS_PROVIDER_SETTING.getKey())); } public void testDoesNothingByDefaultIfUnicastHostsConfigured() { - testConfiguredIfSettingSet(builder().putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey())); + testDoesNothingWithSettings(builder().putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey())); } public void testDoesNothingByDefaultIfMasterNodesConfigured() { - testConfiguredIfSettingSet(builder().putList(INITIAL_MASTER_NODES_SETTING.getKey())); + testDoesNothingWithSettings(builder().putList(INITIAL_MASTER_NODES_SETTING.getKey())); } - private void testConfiguredIfSettingSet(Builder builder) { - clusterBootstrapService = new ClusterBootstrapService(builder.build(), transportService); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> { - throw new AssertionError("should not make a discovery request"); - }); - startServices(); + public void testDoesNothingByDefaultOnMasterIneligibleNodes() { + localNode = new DiscoveryNode("local", randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), emptySet(), + Version.CURRENT); + testDoesNothingWithSettings(Settings.builder()); + } + + private void testDoesNothingWithSettings(Settings.Builder builder) { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(builder.build(), transportService, () -> { + throw new AssertionError("should not be called"); + }, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); deterministicTaskQueue.runAllTasks(); } - public void testBootstrapsAutomaticallyWithDefaultConfiguration() { - clusterBootstrapService = new ClusterBootstrapService(Settings.EMPTY, transportService); + public void testDoesNothingByDefaultIfZen1NodesDiscovered() { + final DiscoveryNode zen1Node = new DiscoveryNode("zen1", buildNewFakeTransportAddress(), singletonMap("zen1", "true"), + singleton(Role.MASTER), Version.CURRENT); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.EMPTY, transportService, () -> + Stream.of(localNode, zen1Node).collect(Collectors.toSet()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); + deterministicTaskQueue.runAllTasks(); + } - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes))); + public void testThrowsExceptionOnDuplicates() { + final IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> { + new ClusterBootstrapService(builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), "duplicate-requirement", "duplicate-requirement").build(), + transportService, Collections::emptyList, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + }); + + assertThat(illegalArgumentException.getMessage(), containsString(INITIAL_MASTER_NODES_SETTING.getKey())); + assertThat(illegalArgumentException.getMessage(), containsString("duplicate-requirement")); + } + + public void testBootstrapsOnDiscoveryOfAllRequiredNodes() { final AtomicBoolean bootstrapped = new AtomicBoolean(); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> { - assertThat(request.getBootstrapConfiguration().getNodeDescriptions().stream() - .map(NodeDescription::getId).collect(Collectors.toSet()), - equalTo(discoveredNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()))); - channel.sendResponse(new BootstrapClusterResponse(randomBoolean())); - assertTrue(bootstrapped.compareAndSet(false, true)); - }); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), containsInAnyOrder(localNode.getId(), otherNode1.getId(), otherNode2.getId())); + assertThat(vc.getNodeIds(), not(hasItem(containsString("placeholder")))); + }); - startServices(); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once + } + public void testBootstrapsOnDiscoveryOfTwoOfThreeRequiredNodes() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> singletonList(otherNode1), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), hasSize(3)); + assertThat(vc.getNodeIds(), hasItem(localNode.getId())); + assertThat(vc.getNodeIds(), hasItem(otherNode1.getId())); + assertThat(vc.getNodeIds(), hasItem(allOf(startsWith(BOOTSTRAP_PLACEHOLDER_PREFIX), containsString(otherNode2.getName())))); + assertTrue(vc.hasQuorum(Stream.of(localNode, otherNode1).map(DiscoveryNode::getId).collect(Collectors.toList()))); + assertFalse(vc.hasQuorum(singletonList(localNode.getId()))); + assertFalse(vc.hasQuorum(singletonList(otherNode1.getId()))); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); assertTrue(bootstrapped.get()); + + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once } - public void testDoesNotRetryOnDiscoveryFailure() { - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - new TransportRequestHandler() { - private boolean called = false; + public void testBootstrapsOnDiscoveryOfThreeOfFiveRequiredNodes() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); - @Override - public void messageReceived(GetDiscoveredNodesRequest request, TransportChannel channel, Task task) { - assert called == false; - called = true; - throw new IllegalArgumentException("simulate failure of discovery request"); - } - }); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName(), + "missing-node-1", "missing-node-2").build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), hasSize(5)); + assertThat(vc.getNodeIds(), hasItem(localNode.getId())); + assertThat(vc.getNodeIds(), hasItem(otherNode1.getId())); + assertThat(vc.getNodeIds(), hasItem(otherNode2.getId())); + + final List placeholders + = vc.getNodeIds().stream().filter(ClusterBootstrapService::isBootstrapPlaceholder).collect(Collectors.toList()); + assertThat(placeholders.size(), equalTo(2)); + assertNotEquals(placeholders.get(0), placeholders.get(1)); + assertThat(placeholders, hasItem(containsString("missing-node-1"))); + assertThat(placeholders, hasItem(containsString("missing-node-2"))); + + assertTrue(vc.hasQuorum(Stream.of(localNode, otherNode1, otherNode2).map(DiscoveryNode::getId).collect(Collectors.toList()))); + assertFalse(vc.hasQuorum(Stream.of(localNode, otherNode1).map(DiscoveryNode::getId).collect(Collectors.toList()))); + assertFalse(vc.hasQuorum(Stream.of(localNode, otherNode1).map(DiscoveryNode::getId).collect(Collectors.toList()))); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); - startServices(); + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once } - public void testBootstrapsOnDiscoverySuccess() { - final AtomicBoolean discoveryAttempted = new AtomicBoolean(); - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> { - assertTrue(discoveryAttempted.compareAndSet(false, true)); - channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes)); - }); + public void testDoesNotBootstrapIfNoNodesDiscovered() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, Collections::emptyList, () -> true, vc -> { + throw new AssertionError("should not be called"); + }); - final AtomicBoolean bootstrapAttempted = new AtomicBoolean(); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> { - assertTrue(bootstrapAttempted.compareAndSet(false, true)); - channel.sendResponse(new BootstrapClusterResponse(false)); - }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } - startServices(); + public void testDoesNotBootstrapIfTwoOfFiveNodesDiscovered() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), + localNode.getName(), otherNode1.getName(), otherNode2.getName(), "not-a-node-1", "not-a-node-2").build(), + transportService, () -> Stream.of(otherNode1).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); + } + + public void testDoesNotBootstrapIfThreeOfSixNodesDiscovered() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), + localNode.getName(), otherNode1.getName(), otherNode2.getName(), "not-a-node-1", "not-a-node-2", "not-a-node-3").build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); - assertTrue(discoveryAttempted.get()); - assertTrue(bootstrapAttempted.get()); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); } - public void testRetriesOnBootstrapFailure() { - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes))); + public void testDoesNotBootstrapIfAlreadyBootstrapped() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> true, vc -> { + throw new AssertionError("should not be called"); + }); - AtomicLong callCount = new AtomicLong(); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> { - callCount.incrementAndGet(); - channel.sendResponse(new ElasticsearchException("simulated exception")); - }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } - startServices(); - while (callCount.get() < 5) { - if (deterministicTaskQueue.hasDeferredTasks()) { - deterministicTaskQueue.advanceTime(); - } - deterministicTaskQueue.runAllRunnableTasks(); - } + public void testDoesNotBootstrapsOnNonMasterNode() { + localNode = new DiscoveryNode("local", randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), emptySet(), + Version.CURRENT); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> + Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); } - public void testStopsRetryingBootstrapWhenStopped() { - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes))); + public void testDoesNotBootstrapsIfLocalNodeNotInInitialMasterNodes() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> + Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> channel.sendResponse(new ElasticsearchException("simulated exception"))); + public void testDoesNotBootstrapsIfNotConfigured() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey()).build(), transportService, + () -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } - deterministicTaskQueue.scheduleAt(deterministicTaskQueue.getCurrentTimeMillis() + 200000, new Runnable() { - @Override - public void run() { - clusterBootstrapService.stop(); - } + public void testDoesNotBootstrapsIfZen1NodesDiscovered() { + final DiscoveryNode zen1Node = new DiscoveryNode("zen1", buildNewFakeTransportAddress(), singletonMap("zen1", "true"), + singleton(Role.MASTER), Version.CURRENT); - @Override - public String toString() { - return "stop cluster bootstrap service"; + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2, zen1Node).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testRetriesBootstrappingOnException() { + + final AtomicLong bootstrappingAttempts = new AtomicLong(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + bootstrappingAttempts.incrementAndGet(); + if (bootstrappingAttempts.get() < 5L) { + throw new ElasticsearchException("test"); } }); - startServices(); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertThat(bootstrappingAttempts.get(), greaterThanOrEqualTo(5L)); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), greaterThanOrEqualTo(40000L)); + } + + public void testCancelsBootstrapIfRequirementMatchesMultipleNodes() { + AtomicReference> discoveredNodes + = new AtomicReference<>(Stream.of(otherNode1, otherNode2).collect(Collectors.toList())); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getAddress().getAddress()).build(), + transportService, discoveredNodes::get, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); - // termination means success + + discoveredNodes.set(emptyList()); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testCancelsBootstrapIfNodeMatchesMultipleRequirements() { + AtomicReference> discoveredNodes + = new AtomicReference<>(Stream.of(otherNode1, otherNode2).collect(Collectors.toList())); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), otherNode1.getAddress().toString(), otherNode1.getName()) + .build(), + transportService, discoveredNodes::get, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + + discoveredNodes.set(Stream.of(new DiscoveryNode(otherNode1.getName(), randomAlphaOfLength(10), buildNewFakeTransportAddress(), + emptyMap(), singleton(Role.MASTER), Version.CURRENT), + new DiscoveryNode("yet-another-node", randomAlphaOfLength(10), otherNode1.getAddress(), emptyMap(), singleton(Role.MASTER), + Version.CURRENT)).collect(Collectors.toList())); + + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testMatchesOnNodeName() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName()).build(), transportService, + Collections::emptyList, () -> false, vc -> assertTrue(bootstrapped.compareAndSet(false, true))); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + } + + public void testMatchesOnNodeAddress() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getAddress().toString()).build(), transportService, + Collections::emptyList, () -> false, vc -> assertTrue(bootstrapped.compareAndSet(false, true))); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + } + + public void testMatchesOnNodeHostAddress() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getAddress().getAddress()).build(), + transportService, Collections::emptyList, () -> false, vc -> assertTrue(bootstrapped.compareAndSet(false, true))); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + } + + public void testDoesNotJustMatchEverything() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), randomAlphaOfLength(10)).build(), transportService, + Collections::emptyList, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testDoesNotIncludeExtraNodes() { + final DiscoveryNode extraNode = newDiscoveryNode("extra-node"); + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2, extraNode).collect(Collectors.toList()), () -> false, + vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), not(hasItem(extraNode.getId()))); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 129b29e1f21e5..cf8e1737a7708 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -38,6 +38,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singletonList; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.equalTo; @@ -245,6 +246,13 @@ public void testDescriptionAfterBootstrapping() { "discovery will continue using [] from hosts providers and [" + localNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", BOOTSTRAP_PLACEHOLDER_PREFIX + "n3"), + emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered or elected yet, an election requires 2 nodes with ids [n1, n2], " + + "have discovered [] which is not a quorum; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", "n4"), emptyList(), emptyList(), 0L) .getDescription(), is("master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + @@ -259,6 +267,20 @@ public void testDescriptionAfterBootstrapping() { "discovery will continue using [] from hosts providers and [" + localNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), + emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + + "have discovered [] which is not a quorum; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", + BOOTSTRAP_PLACEHOLDER_PREFIX + "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered or elected yet, an election requires 3 nodes with ids [n1, n2, n3], " + + "have discovered [] which is not a quorum; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, new String[]{"n1"}, new String[]{"n1"}), emptyList(), emptyList(), 0L).getDescription(), is("master not discovered or elected yet, an election requires a node with id [n1], " + diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 96746f3343e0a..7db63ab120e91 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -19,14 +19,12 @@ package org.elasticsearch.cluster.coordination; import com.carrotsearch.randomizedtesting.RandomizedContext; - import org.apache.logging.log4j.CloseableThreadContext; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -48,13 +46,13 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.discovery.zen.PublishClusterStateStats; import org.elasticsearch.discovery.zen.UnicastHostsProvider.HostsResolver; import org.elasticsearch.env.NodeEnvironment; @@ -82,22 +80,23 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.function.UnaryOperator; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptySet; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.clusterState; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.setValue; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.value; -import static org.elasticsearch.cluster.coordination.Coordinator.PUBLISH_TIMEOUT_SETTING; import static org.elasticsearch.cluster.coordination.Coordinator.Mode.CANDIDATE; import static org.elasticsearch.cluster.coordination.Coordinator.Mode.FOLLOWER; import static org.elasticsearch.cluster.coordination.Coordinator.Mode.LEADER; +import static org.elasticsearch.cluster.coordination.Coordinator.PUBLISH_TIMEOUT_SETTING; import static org.elasticsearch.cluster.coordination.CoordinatorTests.Cluster.DEFAULT_DELAY_VARIABILITY; import static org.elasticsearch.cluster.coordination.ElectionSchedulerFactory.ELECTION_BACK_OFF_TIME_SETTING; import static org.elasticsearch.cluster.coordination.ElectionSchedulerFactory.ELECTION_DURATION_SETTING; @@ -118,14 +117,12 @@ import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -726,70 +723,6 @@ public void testAckListenerReceivesNacksFromFollowerInHigherTerm() { // assertTrue("expected ack from " + follower1, ackCollector.hasAckedSuccessfully(follower1)); } - public void testDiscoveryOfPeersTriggersNotification() { - final Cluster cluster = new Cluster(randomIntBetween(2, 5)); - - // register a listener and then deregister it again to show that it is not called after deregistration - try (Releasable ignored = cluster.getAnyNode().coordinator.withDiscoveryListener(ActionListener.wrap(() -> { - throw new AssertionError("should not be called"); - }))) { - // do nothing - } - - final long startTimeMillis = cluster.deterministicTaskQueue.getCurrentTimeMillis(); - final ClusterNode bootstrapNode = cluster.getAnyNode(); - final AtomicBoolean hasDiscoveredAllPeers = new AtomicBoolean(); - assertFalse(bootstrapNode.coordinator.getFoundPeers().iterator().hasNext()); - try (Releasable ignored = bootstrapNode.coordinator.withDiscoveryListener( - new ActionListener>() { - @Override - public void onResponse(Iterable discoveryNodes) { - int peerCount = 0; - for (final DiscoveryNode discoveryNode : discoveryNodes) { - peerCount++; - } - assertThat(peerCount, lessThan(cluster.size())); - if (peerCount == cluster.size() - 1 && hasDiscoveredAllPeers.get() == false) { - hasDiscoveredAllPeers.set(true); - final long elapsedTimeMillis = cluster.deterministicTaskQueue.getCurrentTimeMillis() - startTimeMillis; - logger.info("--> {} discovered {} peers in {}ms", bootstrapNode.getId(), cluster.size() - 1, elapsedTimeMillis); - assertThat(elapsedTimeMillis, lessThanOrEqualTo(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2)); - } - } - - @Override - public void onFailure(Exception e) { - throw new AssertionError("unexpected", e); - } - })) { - cluster.runFor(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2 + randomLongBetween(0, 60000), "discovery phase"); - } - - assertTrue(hasDiscoveredAllPeers.get()); - - final AtomicBoolean receivedAlreadyBootstrappedException = new AtomicBoolean(); - try (Releasable ignored = bootstrapNode.coordinator.withDiscoveryListener( - new ActionListener>() { - @Override - public void onResponse(Iterable discoveryNodes) { - // ignore - } - - @Override - public void onFailure(Exception e) { - if (e instanceof ClusterAlreadyBootstrappedException) { - receivedAlreadyBootstrappedException.set(true); - } else { - throw new AssertionError("unexpected", e); - } - } - })) { - - cluster.stabilise(); - } - assertTrue(receivedAlreadyBootstrappedException.get()); - } - public void testSettingInitialConfigurationTriggersElection() { final Cluster cluster = new Cluster(randomIntBetween(1, 5)); cluster.runFor(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2 + randomLongBetween(0, 60000), "initial discovery phase"); @@ -811,7 +744,7 @@ public void testSettingInitialConfigurationTriggersElection() { assertThat(nodeId + " should have found all peers", foundPeers, hasSize(cluster.size())); } - final ClusterNode bootstrapNode = cluster.getAnyNode(); + final ClusterNode bootstrapNode = cluster.getAnyBootstrappableNode(); bootstrapNode.applyInitialConfiguration(); assertTrue(bootstrapNode.getId() + " has been bootstrapped", bootstrapNode.coordinator.isInitialConfigurationSet()); @@ -841,13 +774,13 @@ public void testCannotSetInitialConfigurationTwice() { public void testCannotSetInitialConfigurationWithoutQuorum() { final Cluster cluster = new Cluster(randomIntBetween(1, 5)); final Coordinator coordinator = cluster.getAnyNode().coordinator; - final VotingConfiguration unknownNodeConfiguration = new VotingConfiguration(Collections.singleton("unknown-node")); + final VotingConfiguration unknownNodeConfiguration = new VotingConfiguration( + Sets.newHashSet(coordinator.getLocalNode().getId(), "unknown-node")); final String exceptionMessage = expectThrows(CoordinationStateRejectedException.class, () -> coordinator.setInitialConfiguration(unknownNodeConfiguration)).getMessage(); assertThat(exceptionMessage, startsWith("not enough nodes discovered to form a quorum in the initial configuration [knownNodes=[")); - assertThat(exceptionMessage, - endsWith("], VotingConfiguration{unknown-node}]")); + assertThat(exceptionMessage, containsString("unknown-node")); assertThat(exceptionMessage, containsString(coordinator.getLocalNode().toString())); // This is VERY BAD: setting a _different_ initial configuration. Yet it works if the first attempt will never be a quorum. @@ -855,6 +788,16 @@ public void testCannotSetInitialConfigurationWithoutQuorum() { cluster.stabilise(); } + public void testCannotSetInitialConfigurationWithoutLocalNode() { + final Cluster cluster = new Cluster(randomIntBetween(1, 5)); + final Coordinator coordinator = cluster.getAnyNode().coordinator; + final VotingConfiguration unknownNodeConfiguration = new VotingConfiguration(Sets.newHashSet("unknown-node")); + final String exceptionMessage = expectThrows(CoordinationStateRejectedException.class, + () -> coordinator.setInitialConfiguration(unknownNodeConfiguration)).getMessage(); + assertThat(exceptionMessage, + equalTo("local node is not part of initial configuration")); + } + public void testDiffBasedPublishing() { final Cluster cluster = new Cluster(randomIntBetween(1, 5)); cluster.runRandomly(); @@ -1271,12 +1214,8 @@ public String toString() { } } else if (rarely()) { final ClusterNode clusterNode = getAnyNode(); - clusterNode.onNode( - () -> { - logger.debug("----> [runRandomly {}] applying initial configuration {} to {}", - thisStep, initialConfiguration, clusterNode.getId()); - clusterNode.coordinator.setInitialConfiguration(initialConfiguration); - }).run(); + logger.debug("----> [runRandomly {}] applying initial configuration on {}", step, clusterNode.getId()); + clusterNode.applyInitialConfiguration(); } else { if (deterministicTaskQueue.hasDeferredTasks() && randomBoolean()) { deterministicTaskQueue.advanceTime(); @@ -1401,7 +1340,7 @@ void bootstrapIfNecessary() { assertThat("setting initial configuration may fail with disconnected nodes", disconnectedNodes, empty()); assertThat("setting initial configuration may fail with blackholed nodes", blackholedNodes, empty()); runFor(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2, "discovery prior to setting initial configuration"); - final ClusterNode bootstrapNode = getAnyMasterEligibleNode(); + final ClusterNode bootstrapNode = getAnyBootstrappableNode(); bootstrapNode.applyInitialConfiguration(); } else { logger.info("setting initial configuration not required"); @@ -1472,8 +1411,10 @@ boolean nodeExists(DiscoveryNode node) { return clusterNodes.stream().anyMatch(cn -> cn.getLocalNode().equals(node)); } - ClusterNode getAnyMasterEligibleNode() { - return randomFrom(clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode()).collect(Collectors.toList())); + ClusterNode getAnyBootstrappableNode() { + return randomFrom(clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode()) + .filter(n -> initialConfiguration.getNodeIds().contains(n.getLocalNode().getId())) + .collect(Collectors.toList())); } ClusterNode getAnyNode() { @@ -1803,11 +1744,24 @@ ClusterState getLastAppliedClusterState() { void applyInitialConfiguration() { onNode(() -> { + final Set nodeIdsWithPlaceholders = new HashSet<>(initialConfiguration.getNodeIds()); + Stream.generate(() -> BOOTSTRAP_PLACEHOLDER_PREFIX + UUIDs.randomBase64UUID(random())) + .limit((Math.max(initialConfiguration.getNodeIds().size(), 2) - 1) / 2) + .forEach(nodeIdsWithPlaceholders::add); + final Set nodeIds = new HashSet<>( + randomSubsetOf(initialConfiguration.getNodeIds().size(), nodeIdsWithPlaceholders)); + // initial configuration should not have a place holder for local node + if (initialConfiguration.getNodeIds().contains(localNode.getId()) && nodeIds.contains(localNode.getId()) == false) { + nodeIds.remove(nodeIds.iterator().next()); + nodeIds.add(localNode.getId()); + } + final VotingConfiguration configurationWithPlaceholders = new VotingConfiguration(nodeIds); try { - coordinator.setInitialConfiguration(initialConfiguration); - logger.info("successfully set initial configuration to {}", initialConfiguration); + coordinator.setInitialConfiguration(configurationWithPlaceholders); + logger.info("successfully set initial configuration to {}", configurationWithPlaceholders); } catch (CoordinationStateRejectedException e) { - logger.info(new ParameterizedMessage("failed to set initial configuration to {}", initialConfiguration), e); + logger.info(new ParameterizedMessage("failed to set initial configuration to {}", + configurationWithPlaceholders), e); } }).run(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java index 6d985d46ef6e4..4f1016847c887 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java @@ -31,7 +31,9 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.test.EqualsHashCodeTestUtils.CopyFunction; +import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.MockTransport; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportException; @@ -41,12 +43,21 @@ import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptySet; import static org.elasticsearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_ACTION_NAME; @@ -62,6 +73,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsInstanceOf.instanceOf; +import static org.mockito.Mockito.mock; public class FollowersCheckerTests extends ESTestCase { @@ -536,6 +548,46 @@ public String executor() { } } + private void testPreferMasterNodes() { + List nodes = randomNodes(10); + DiscoveryNodes.Builder discoNodesBuilder = DiscoveryNodes.builder(); + nodes.forEach(dn -> discoNodesBuilder.add(dn)); + DiscoveryNodes discoveryNodes = discoNodesBuilder.localNodeId(nodes.get(0).getId()).build(); + CapturingTransport capturingTransport = new CapturingTransport(); + TransportService transportService = capturingTransport.createTransportService(Settings.EMPTY, mock(ThreadPool.class), + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> nodes.get(0), null, emptySet()); + final FollowersChecker followersChecker = new FollowersChecker(Settings.EMPTY, transportService, fcr -> { + assert false : fcr; + }, (node, reason) -> { + assert false : node; + }); + followersChecker.setCurrentNodes(discoveryNodes); + List followerTargets = Stream.of(capturingTransport.getCapturedRequestsAndClear()) + .map(cr -> cr.node).collect(Collectors.toList()); + List sortedFollowerTargets = new ArrayList<>(followerTargets); + Collections.sort(sortedFollowerTargets, Comparator.comparing(n -> n.isMasterNode() == false)); + assertEquals(sortedFollowerTargets, followerTargets); + } + + private static List randomNodes(final int numNodes) { + List nodesList = new ArrayList<>(); + for (int i = 0; i < numNodes; i++) { + Map attributes = new HashMap<>(); + if (frequently()) { + attributes.put("custom", randomBoolean() ? "match" : randomAlphaOfLengthBetween(3, 5)); + } + final DiscoveryNode node = newNode(i, attributes, + new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values())))); + nodesList.add(node); + } + return nodesList; + } + + private static DiscoveryNode newNode(int nodeId, Map attributes, Set roles) { + return new DiscoveryNode("name_" + nodeId, "node_" + nodeId, buildNewFakeTransportAddress(), attributes, roles, + Version.CURRENT); + } + private static class ExpectsSuccess implements TransportResponseHandler { private final AtomicBoolean responseReceived = new AtomicBoolean(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutorTests.java index 65df746789c89..e63e4e01196fb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutorTests.java @@ -70,7 +70,7 @@ public void testRemovingNonExistentNodes() throws Exception { public void testRerouteAfterRemovingNodes() throws Exception { final AllocationService allocationService = mock(AllocationService.class); - when(allocationService.deassociateDeadNodes(any(ClusterState.class), eq(true), any(String.class))) + when(allocationService.disassociateDeadNodes(any(ClusterState.class), eq(true), any(String.class))) .thenAnswer(im -> im.getArguments()[0]); final AtomicReference remainingNodesClusterState = new AtomicReference<>(); @@ -102,7 +102,7 @@ protected ClusterState remainingNodesClusterState(ClusterState currentState, final ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(clusterState, tasks); - verify(allocationService).deassociateDeadNodes(eq(remainingNodesClusterState.get()), eq(true), any(String.class)); + verify(allocationService).disassociateDeadNodes(eq(remainingNodesClusterState.get()), eq(true), any(String.class)); for (final NodeRemovalClusterStateTaskExecutor.Task task : tasks) { assertNull(result.resultingState.nodes().get(task.node().getId())); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java index 4435073d95363..658250bc7a4da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.coordination; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; @@ -33,10 +34,13 @@ import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -97,8 +101,8 @@ abstract class MockPublication extends Publication { boolean committed; - Map> pendingPublications = new HashMap<>(); - Map> pendingCommits = new HashMap<>(); + Map> pendingPublications = new LinkedHashMap<>(); + Map> pendingCommits = new LinkedHashMap<>(); Map joins = new HashMap<>(); Set missingJoins = new HashSet<>(); @@ -346,7 +350,7 @@ public void testClusterStatePublishingFailsOrTimesOutBeforeCommit() throws Inter publication.pendingPublications.entrySet().stream().collect(shuffle()).forEach(e -> { if (e.getKey().equals(n2)) { if (timeOut) { - publication.onTimeout(); + publication.cancel("timed out"); } else { e.getValue().onFailure(new TransportException(new Exception("dummy failure"))); } @@ -372,6 +376,22 @@ public void testClusterStatePublishingFailsOrTimesOutBeforeCommit() throws Inter tuple.v1().equals(n2) ? "dummy failure" : "non-failed nodes do not form a quorum"))); } + public void testPublishingToMastersFirst() { + VotingConfiguration singleNodeConfig = new VotingConfiguration(Sets.newHashSet(n1.getId())); + initializeCluster(singleNodeConfig); + + DiscoveryNodes.Builder discoNodesBuilder = DiscoveryNodes.builder(); + randomNodes(10).forEach(dn -> discoNodesBuilder.add(dn)); + DiscoveryNodes discoveryNodes = discoNodesBuilder.add(n1).localNodeId(n1.getId()).build(); + MockPublication publication = node1.publish(CoordinationStateTests.clusterState(1L, 2L, + discoveryNodes, singleNodeConfig, singleNodeConfig, 42L), null, Collections.emptySet()); + + List publicationTargets = new ArrayList<>(publication.pendingPublications.keySet()); + List sortedPublicationTargets = new ArrayList<>(publicationTargets); + Collections.sort(sortedPublicationTargets, Comparator.comparing(n -> n.isMasterNode() == false)); + assertEquals(sortedPublicationTargets, publicationTargets); + } + public void testClusterStatePublishingTimesOutAfterCommit() throws InterruptedException { VotingConfiguration config = new VotingConfiguration(randomBoolean() ? Sets.newHashSet(n1.getId(), n2.getId()) : Sets.newHashSet(n1.getId(), n2.getId(), n3.getId())); @@ -407,7 +427,7 @@ public void testClusterStatePublishingTimesOutAfterCommit() throws InterruptedEx } }); - publication.onTimeout(); + publication.cancel("timed out"); assertTrue(publication.completed); assertTrue(publication.committed); assertEquals(committingNodes, ackListener.await(0L, TimeUnit.SECONDS)); @@ -428,6 +448,25 @@ public void testClusterStatePublishingTimesOutAfterCommit() throws InterruptedEx assertEquals(discoNodes, ackListener.await(0L, TimeUnit.SECONDS)); } + private static List randomNodes(final int numNodes) { + List nodesList = new ArrayList<>(); + for (int i = 0; i < numNodes; i++) { + Map attributes = new HashMap<>(); + if (frequently()) { + attributes.put("custom", randomBoolean() ? "match" : randomAlphaOfLengthBetween(3, 5)); + } + final DiscoveryNode node = newNode(i, attributes, + new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values())))); + nodesList.add(node); + } + return nodesList; + } + + private static DiscoveryNode newNode(int nodeId, Map attributes, Set roles) { + return new DiscoveryNode("name_" + nodeId, "node_" + nodeId, buildNewFakeTransportAddress(), attributes, roles, + Version.CURRENT); + } + public static Collector> shuffle() { return Collectors.collectingAndThen(Collectors.toList(), ts -> { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java index 5c67e1bbe566c..2f52bd0d40aae 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java @@ -93,25 +93,25 @@ public void testExpression_MultiParts() throws Exception { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37037") public void testExpression_CustomFormat() throws Exception { - List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{YYYY.MM.dd}}>")); + List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{yyyy.MM.dd}}>")); assertThat(results.size(), equalTo(1)); assertThat(results.get(0), - equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + equalTo(".marvel-" + DateTimeFormat.forPattern("yyyy.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); } public void testExpression_EscapeStatic() throws Exception { List result = expressionResolver.resolve(context, Arrays.asList("<.mar\\{v\\}el-{now/d}>")); assertThat(result.size(), equalTo(1)); assertThat(result.get(0), - equalTo(".mar{v}el-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + equalTo(".mar{v}el-" + DateTimeFormat.forPattern("yyyy.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37037") public void testExpression_EscapeDateFormat() throws Exception { - List result = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{'\\{year\\}'YYYY}}>")); + List result = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{'\\{year\\}'yyyy}}>")); assertThat(result.size(), equalTo(1)); assertThat(result.get(0), - equalTo(".marvel-" + DateTimeFormat.forPattern("'{year}'YYYY").print(new DateTime(context.getStartTime(), UTC)))); + equalTo(".marvel-" + DateTimeFormat.forPattern("'{year}'yyyy").print(new DateTime(context.getStartTime(), UTC)))); } public void testExpression_MixedArray() throws Exception { @@ -150,10 +150,10 @@ public void testExpression_CustomTimeZoneInIndexName() throws Exception { now = DateTime.now(UTC).withHourOfDay(0).withMinuteOfHour(0).withSecondOfMinute(0); } Context context = new Context(this.context.getState(), this.context.getOptions(), now.getMillis()); - List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{YYYY.MM.dd|" + timeZone.getID() + "}}>")); + List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{yyyy.MM.dd|" + timeZone.getID() + "}}>")); assertThat(results.size(), equalTo(1)); logger.info("timezone: [{}], now [{}], name: [{}]", timeZone, now, results.get(0)); - assertThat(results.get(0), equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(now.withZone(timeZone)))); + assertThat(results.get(0), equalTo(".marvel-" + DateTimeFormat.forPattern("yyyy.MM.dd").print(now.withZone(timeZone)))); } public void testExpressionInvalidUnescaped() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index cf0b1eebe983a..5c92029520e8d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -149,6 +150,18 @@ public void testResolveNodesIds() { assertThat(resolvedNodesIds, equalTo(expectedNodesIds)); } + public void testMastersFirst() { + final List inputNodes = randomNodes(10); + final DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + inputNodes.forEach(discoBuilder::add); + final List returnedNodes = discoBuilder.build().mastersFirstStream().collect(Collectors.toList()); + assertEquals(returnedNodes.size(), inputNodes.size()); + assertEquals(new HashSet<>(returnedNodes), new HashSet<>(inputNodes)); + final List sortedNodes = new ArrayList<>(returnedNodes); + Collections.sort(sortedNodes, Comparator.comparing(n -> n.isMasterNode() == false)); + assertEquals(sortedNodes, returnedNodes); + } + public void testDeltas() { Set nodesA = new HashSet<>(); nodesA.addAll(randomNodes(1 + randomInt(10))); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java index b168bae87de41..6897380ee9f01 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java @@ -104,7 +104,7 @@ public void testNoDelayedUnassigned() throws Exception { nodes.add(newNode("node3")); } clusterState = ClusterState.builder(clusterState).nodes(nodes).build(); - clusterState = allocationService.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocationService.disassociateDeadNodes(clusterState, true, "reroute"); ClusterState newState = clusterState; List unassignedShards = newState.getRoutingTable().shardsWithState(ShardRoutingState.UNASSIGNED); if (nodeAvailableForAllocation) { @@ -153,7 +153,7 @@ public void testDelayedUnassignedScheduleReroute() throws Exception { // remove node that has replica and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(nodeId)).build(); - clusterState = allocationService.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocationService.disassociateDeadNodes(clusterState, true, "reroute"); ClusterState stateWithDelayedShard = clusterState; // make sure the replica is marked as delayed (i.e. not reallocated) assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard)); @@ -264,7 +264,7 @@ public void testDelayedUnassignedScheduleRerouteAfterDelayedReroute() throws Exc .build(); // make sure both replicas are marked as delayed (i.e. not reallocated) allocationService.setNanoTimeOverride(baseTimestampNanos); - clusterState = allocationService.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocationService.disassociateDeadNodes(clusterState, true, "reroute"); final ClusterState stateWithDelayedShards = clusterState; assertEquals(2, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShards)); RoutingNodes.UnassignedShards.UnassignedIterator iter = stateWithDelayedShards.getRoutingNodes().unassigned().iterator(); @@ -401,7 +401,7 @@ public void testDelayedUnassignedScheduleRerouteRescheduledOnShorterDelay() thro // remove node that has replica and reroute clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder(clusterState.nodes()).remove(nodeIdOfFooReplica)).build(); - clusterState = allocationService.deassociateDeadNodes(clusterState, true, "fake node left"); + clusterState = allocationService.disassociateDeadNodes(clusterState, true, "fake node left"); ClusterState stateWithDelayedShard = clusterState; // make sure the replica is marked as delayed (i.e. not reallocated) assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard)); @@ -444,7 +444,7 @@ public void testDelayedUnassignedScheduleRerouteRescheduledOnShorterDelay() thro // remove node that has replica and reroute clusterState = ClusterState.builder(stateWithDelayedShard).nodes( DiscoveryNodes.builder(stateWithDelayedShard.nodes()).remove(nodeIdOfBarReplica)).build(); - ClusterState stateWithShorterDelay = allocationService.deassociateDeadNodes(clusterState, true, "fake node left"); + ClusterState stateWithShorterDelay = allocationService.disassociateDeadNodes(clusterState, true, "fake node left"); delayedAllocationService.setNanoTimeOverride(clusterChangeEventTimestampNanos); delayedAllocationService.clusterChanged( new ClusterChangedEvent("fake node left", stateWithShorterDelay, stateWithDelayedShard)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java deleted file mode 100644 index f9d3e0ab95c01..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing; - -import org.elasticsearch.cluster.ESAllocationTestCase; -import org.junit.Before; - -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.hamcrest.Matchers.equalTo; - -public class RoutingServiceTests extends ESAllocationTestCase { - - private TestRoutingService routingService; - - @Before - public void createRoutingService() { - routingService = new TestRoutingService(); - } - - public void testReroute() { - assertThat(routingService.hasReroutedAndClear(), equalTo(false)); - routingService.reroute("test"); - assertThat(routingService.hasReroutedAndClear(), equalTo(true)); - } - - private class TestRoutingService extends RoutingService { - - private AtomicBoolean rerouted = new AtomicBoolean(); - - TestRoutingService() { - super(null, null); - } - - public boolean hasReroutedAndClear() { - return rerouted.getAndSet(false); - } - - @Override - protected void performReroute(String reason) { - logger.info("--> performing fake reroute [{}]", reason); - rerouted.set(true); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 9cf4ef447093d..16cde1a990907 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -241,7 +241,7 @@ public void testNodeLeave() { assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false)); // remove node2 and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); // verify that NODE_LEAVE is the reason for meta assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); @@ -332,7 +332,7 @@ public void testNumberOfDelayedUnassigned() throws Exception { // remove node2 and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); // make sure both replicas are marked as delayed (i.e. not reallocated) - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); assertThat(clusterState.toString(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2)); } @@ -364,7 +364,7 @@ public void testFindNextDelayedAllocation() { final long baseTime = System.nanoTime(); allocation.setNanoTimeOverride(baseTime); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); final long delta = randomBoolean() ? 0 : randomInt((int) expectMinDelaySettingsNanos - 1); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index b3e0146bd7f1d..d2ef2de5458cb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -341,7 +341,7 @@ private ClusterState removeNodes(ClusterState clusterState, AllocationService se } clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); - clusterState = service.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = service.disassociateDeadNodes(clusterState, true, "reroute"); logger.info("start all the primary shards, replicas will start initializing"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 5c2bfbadd2165..10fa01c2a2b12 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -181,7 +181,7 @@ private ClusterState removeNodes(ClusterState clusterState, AllocationService st clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); if (removed) { - clusterState = strategy.deassociateDeadNodes(clusterState, randomBoolean(), "removed nodes"); + clusterState = strategy.disassociateDeadNodes(clusterState, randomBoolean(), "removed nodes"); } logger.info("start all the primary shards, replicas will start initializing"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index 1ee136b77b0b8..f9dee9807b400 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -84,7 +84,7 @@ public void testSimpleDeadNodeOnStartedPrimaryShard() { .add(newNode(nodeIdRemaining)) ).build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); assertThat(clusterState.getRoutingNodes().node(nodeIdRemaining).iterator().next().primary(), equalTo(true)); assertThat(clusterState.getRoutingNodes().node(nodeIdRemaining).iterator().next().state(), equalTo(STARTED)); @@ -154,7 +154,7 @@ public void testDeadNodeWhileRelocatingOnToNode() { .add(newNode(origPrimaryNodeId)) .add(newNode(origReplicaNodeId)) ).build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(STARTED)); assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED)); @@ -224,7 +224,7 @@ public void testDeadNodeWhileRelocatingOnFromNode() { .add(newNode("node3")) .add(newNode(origReplicaNodeId)) ).build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED)); assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java index fa28b4b1e3482..74e7ac3273634 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java @@ -96,7 +96,7 @@ public void testElectReplicaAsPrimaryDuringRelocation() { indexShardRoutingTable.primaryShard().currentNodeId()); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove(indexShardRoutingTable.primaryShard().currentNodeId())).build(); - clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute"); logger.info("make sure all the primary shards are active"); assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().active(), equalTo(true)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 3e07d732db7b8..71a31b320591a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -111,7 +111,7 @@ public void testSimpleFailedNodeTest() { .remove(clusterState.routingTable().index("test2").shard(0).primaryShard().currentNodeId()) ) .build(); - clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute"); routingNodes = clusterState.getRoutingNodes(); for (RoutingNode routingNode : routingNodes) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java index 8630d2cc5b6d9..5e1ff70d9d68e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java @@ -101,7 +101,7 @@ public void testInSyncAllocationIdsUpdated() { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove("node1")) .build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); // in-sync allocation ids should not be updated assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(3)); @@ -110,7 +110,7 @@ public void testInSyncAllocationIdsUpdated() { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove("node2").remove("node3")) .build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); // in-sync allocation ids should not be updated assertThat(clusterState.getRoutingTable().shardsWithState(UNASSIGNED).size(), equalTo(3)); @@ -145,7 +145,7 @@ public void testInSyncAllocationIdsUpdated() { /** * Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica. * The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, the node of the replica was - * removed from the cluster (deassociateDeadNodes). This means that the ShardRouting of the replica was failed, but it's allocation + * removed from the cluster (disassociateDeadNodes). This means that the ShardRouting of the replica was failed, but it's allocation * id is still part of the in-sync set. We have to make sure that the failShard request from the primary removes the allocation id * from the in-sync set. */ @@ -158,7 +158,7 @@ public void testDeadNodesBeforeReplicaFailed() throws Exception { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove(replicaShard.currentNodeId())) .build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(2)); logger.info("fail replica (for which there is no shard routing in the CS anymore)"); @@ -228,7 +228,7 @@ public void testInSyncIdsNotGrowingWithoutBounds() throws Exception { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove(replicaShard.currentNodeId())) .build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); // in-sync allocation ids should not be updated assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0)); @@ -248,7 +248,7 @@ public void testInSyncIdsNotGrowingWithoutBounds() throws Exception { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove(replicaShard.currentNodeId())) .build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); } // in-sync allocation set is bounded @@ -276,7 +276,7 @@ public void testInSyncIdsNotTrimmedWhenNotGrowing() throws Exception { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove(replicaShard.currentNodeId())) .build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); // in-sync allocation ids should not be updated assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0)); @@ -285,7 +285,7 @@ public void testInSyncIdsNotTrimmedWhenNotGrowing() throws Exception { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove(primaryShard.currentNodeId())) .build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); // in-sync allocation ids should not be updated assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0)); @@ -327,7 +327,7 @@ public void testPrimaryAllocationIdNotRemovedFromInSyncSetWhenNoFailOver() throw clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove(replicaShard.currentNodeId())) .build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); // in-sync allocation ids should not be updated assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index e59b492606a06..f0d25175f6e43 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -388,7 +388,7 @@ public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() { private ClusterState stabilize(ClusterState clusterState, AllocationService service) { logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes()); - clusterState = service.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = service.disassociateDeadNodes(clusterState, true, "reroute"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertRecoveryNodeVersions(routingNodes); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java index b47638884e883..d80feedabf0e6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java @@ -94,7 +94,7 @@ public void testPreferLocalPrimaryAllocationOverFiltered() { .build(); clusterState = ClusterState.builder(clusterState).metaData(metaData) .nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); - clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute"); logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned", numberOfShards, numberOfShards); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java index d6220ade9ef34..f06a38a2ba462 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java @@ -77,7 +77,7 @@ public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .add(newNode("node3")).remove("node1")).build(); RoutingTable prevRoutingTable = clusterState.routingTable(); - clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute"); routingNodes = clusterState.getRoutingNodes(); routingTable = clusterState.routingTable(); @@ -129,7 +129,7 @@ public void testRemovingInitializingReplicasIfPrimariesFails() { String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(); String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1"; clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode(nodeIdRemaining))).build(); - clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); routingNodes = clusterState.getRoutingNodes(); assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index fe7c4a89c9fa2..0c8ebff594598 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -130,7 +130,7 @@ public void testRandomDecisions() { stateBuilder.nodes(newNodesBuilder.build()); clusterState = stateBuilder.build(); if (nodesRemoved) { - clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute"); } else { clusterState = strategy.reroute(clusterState, "reroute"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index deed0b2b2fb72..72364e3fbc925 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -334,7 +334,7 @@ public void testBalanceAllNodesStartedAddIndex() { IndexShardRoutingTable indexShardRoutingTable = clusterState.routingTable().index("test").shard(0); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove(indexShardRoutingTable.primaryShard().currentNodeId())).build(); - clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); + clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute"); routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index c674b8c3a292d..8aab17160fd65 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -117,7 +117,7 @@ public void testSingleIndexStartedShard() { logger.info("Killing node1 where the shard is, checking the shard is unassigned"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); - newState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); + newState = strategy.disassociateDeadNodes(clusterState, true, "reroute"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java index 1bbbb2c8b91a3..a7980b4d0f9e5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java @@ -130,7 +130,7 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { logger.info("Kill node1, backup shard should become primary"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); - newState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); + newState = strategy.disassociateDeadNodes(clusterState, true, "reroute"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index 70b728487ae91..2ca7ba7d1a306 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -111,7 +111,7 @@ public void testFilterInitialRecovery() { // now remove the node of the other copy and fail the current DiscoveryNode node1 = state.nodes().resolveNode("node1"); - state = service.deassociateDeadNodes( + state = service.disassociateDeadNodes( ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).remove("node1")).build(), true, "test"); state = service.applyFailedShard(state, routingTable.index("idx").shard(0).primaryShard(), randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/common/RoundingTests.java b/server/src/test/java/org/elasticsearch/common/RoundingTests.java index 1664f67a44df9..9bc7c10abd8c8 100644 --- a/server/src/test/java/org/elasticsearch/common/RoundingTests.java +++ b/server/src/test/java/org/elasticsearch/common/RoundingTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -317,7 +318,7 @@ public void testIntervalRounding_HalfDay_DST() { } /** - * randomized test on {@link org.elasticsearch.common.rounding.Rounding.TimeIntervalRounding} with random interval and time zone offsets + * randomized test on {@link org.elasticsearch.common.Rounding.TimeIntervalRounding} with random interval and time zone offsets */ public void testIntervalRoundingRandom() { for (int i = 0; i < 1000; i++) { @@ -728,7 +729,7 @@ private static long time(String time) { } private static long time(String time, ZoneId zone) { - TemporalAccessor accessor = DateFormatters.forPattern("date_optional_time").withZone(zone).parse(time); + TemporalAccessor accessor = DateFormatter.forPattern("date_optional_time").withZone(zone).parse(time); return DateFormatters.toZonedDateTime(accessor).toInstant().toEpochMilli(); } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/StreamTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/StreamTests.java index 53a00111c4289..e2cdaf3c7d5b8 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/StreamTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/StreamTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.common.io.stream; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -39,6 +41,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -293,15 +296,27 @@ public int hashCode() { } - final int length = randomIntBetween(0, 16); - final Collection fooBars = new ArrayList<>(length); + runWriteReadCollectionTest( + () -> new FooBar(randomInt(), randomInt()), StreamOutput::writeCollection, in -> in.readList(FooBar::new)); + } + + public void testStringCollection() throws IOException { + runWriteReadCollectionTest(() -> randomUnicodeOfLength(16), StreamOutput::writeStringCollection, StreamInput::readStringList); + } + + private void runWriteReadCollectionTest( + final Supplier supplier, + final CheckedBiConsumer, IOException> writer, + final CheckedFunction, IOException> reader) throws IOException { + final int length = randomIntBetween(0, 10); + final Collection collection = new ArrayList<>(length); for (int i = 0; i < length; i++) { - fooBars.add(new FooBar(randomInt(), randomInt())); + collection.add(supplier.get()); } try (BytesStreamOutput out = new BytesStreamOutput()) { - out.writeCollection(fooBars); + writer.accept(out, collection); try (StreamInput in = out.bytes().streamInput()) { - assertThat(fooBars, equalTo(in.readList(FooBar::new))); + assertThat(collection, equalTo(reader.apply(in))); } } } diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index b2370dadb604c..c7abea63be081 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -29,13 +29,11 @@ import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import java.time.format.DateTimeParseException; import java.time.temporal.TemporalAccessor; import java.util.Locale; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.startsWith; public class JavaJodaTimeDuellingTests extends ESTestCase { @@ -64,11 +62,22 @@ public void testTimeZoneFormatting() { formatter3.parse("20181126T121212.123-0830"); } - public void testCustomTimeFormats() { - assertSameDate("2010 12 06 11:05:15", "yyyy dd MM HH:mm:ss"); - assertSameDate("12/06", "dd/MM"); - assertSameDate("Nov 24 01:29:01 -0800", "MMM dd HH:mm:ss Z"); - } + // this test requires tests to run with -Djava.locale.providers=COMPAT in order to work +// public void testCustomTimeFormats() { +// assertSameDate("2010 12 06 11:05:15", "yyyy dd MM HH:mm:ss"); +// assertSameDate("12/06", "dd/MM"); +// assertSameDate("Nov 24 01:29:01 -0800", "MMM dd HH:mm:ss Z"); +// +// // also ensure that locale based dates are the same +// assertSameDate("Di., 05 Dez. 2000 02:55:00 -0800", "E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de")); +// assertSameDate("Mi., 06 Dez. 2000 02:55:00 -0800", "E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de")); +// assertSameDate("Do., 07 Dez. 2000 00:00:00 -0800", "E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de")); +// assertSameDate("Fr., 08 Dez. 2000 00:00:00 -0800", "E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de")); +// +// DateTime dateTimeNow = DateTime.now(DateTimeZone.UTC); +// ZonedDateTime javaTimeNow = Instant.ofEpochMilli(dateTimeNow.getMillis()).atZone(ZoneOffset.UTC); +// assertSamePrinterOutput("E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de"), javaTimeNow, dateTimeNow); +// } public void testDuellingFormatsValidParsing() { assertSameDate("1522332219", "epoch_second"); @@ -133,10 +142,6 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-12-31T12:12:12.1", "date_hour_minute_second_millis"); assertSameDate("2018-12-31T12:12:12.1", "date_hour_minute_second_fraction"); - assertSameDate("10000", "date_optional_time"); - assertSameDate("10000T", "date_optional_time"); - assertSameDate("2018", "date_optional_time"); - assertSameDate("2018T", "date_optional_time"); assertSameDate("2018-05", "date_optional_time"); assertSameDate("2018-05-30", "date_optional_time"); assertSameDate("2018-05-30T20", "date_optional_time"); @@ -278,7 +283,7 @@ public void testDuellingFormatsValidParsing() { // joda comes up with a different exception message here, so we have to adapt assertJodaParseException("2012-W1-8", "week_date", "Cannot parse \"2012-W1-8\": Value 8 for dayOfWeek must be in the range [1,7]"); - assertJavaTimeParseException("2012-W1-8", "week_date", "Text '2012-W1-8' could not be parsed"); + assertJavaTimeParseException("2012-W1-8", "week_date"); assertSameDate("2012-W48-6T10:15:30.123Z", "week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+0100", "week_date_time"); @@ -358,6 +363,7 @@ public void testDuelingStrictParsing() { assertParseException("2018-12-1", "strict_date_optional_time"); assertParseException("2018-1-31", "strict_date_optional_time"); assertParseException("10000-01-31", "strict_date_optional_time"); + assertSameDate("2010-01-05T02:00", "strict_date_optional_time"); assertSameDate("2018-12-31T10:15:30", "strict_date_optional_time"); assertSameDate("2018-12-31T10:15:30Z", "strict_date_optional_time"); assertSameDate("2018-12-31T10:15:30+0100", "strict_date_optional_time"); @@ -365,6 +371,7 @@ public void testDuelingStrictParsing() { assertParseException("2018-12-31T10:15:3", "strict_date_optional_time"); assertParseException("2018-12-31T10:5:30", "strict_date_optional_time"); assertParseException("2018-12-31T9:15:30", "strict_date_optional_time"); + assertSameDate("2015-01-04T00:00Z", "strict_date_optional_time"); assertSameDate("2018-12-31T10:15:30.123Z", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.123+0100", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.123+01:00", "strict_date_time"); @@ -456,7 +463,7 @@ public void testDuelingStrictParsing() { // joda comes up with a different exception message here, so we have to adapt assertJodaParseException("2012-W01-8", "strict_week_date", "Cannot parse \"2012-W01-8\": Value 8 for dayOfWeek must be in the range [1,7]"); - assertJavaTimeParseException("2012-W01-8", "strict_week_date", "Text '2012-W01-8' could not be parsed"); + assertJavaTimeParseException("2012-W01-8", "strict_week_date"); assertSameDate("2012-W48-6T10:15:30.123Z", "strict_week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+0100", "strict_week_date_time"); @@ -585,19 +592,55 @@ public void testSamePrinterOutput() { assertSamePrinterOutput("strictYear", javaDate, jodaDate); assertSamePrinterOutput("strictYearMonth", javaDate, jodaDate); assertSamePrinterOutput("strictYearMonthDay", javaDate, jodaDate); + assertSamePrinterOutput("strict_date_optional_time", javaDate, jodaDate); + assertSamePrinterOutput("epoch_millis", javaDate, jodaDate); + } + + public void testSamePrinterOutputWithTimeZone() { + String format = "strict_date_optional_time"; + String dateInput = "2017-02-01T08:02:00.000-01:00"; + DateFormatter javaFormatter = DateFormatter.forPattern(format); + TemporalAccessor javaDate = javaFormatter.parse(dateInput); + + DateFormatter jodaFormatter = Joda.forPattern(format); + DateTime dateTime = jodaFormatter.parseJoda(dateInput); + + String javaDateString = javaFormatter.withZone(ZoneOffset.ofHours(-1)).format(javaDate); + String jodaDateString = jodaFormatter.withZone(ZoneOffset.ofHours(-1)).formatJoda(dateTime); + String message = String.format(Locale.ROOT, "expected string representation to be equal for format [%s]: joda [%s], java [%s]", + format, jodaDateString, javaDateString); + assertThat(message, javaDateString, is(jodaDateString)); + } + + public void testDateFormatterWithLocale() { + Locale locale = randomLocale(random()); + String pattern = randomBoolean() ? "strict_date_optional_time||date_time" : "date_time||strict_date_optional_time"; + DateFormatter formatter = DateFormatter.forPattern(pattern).withLocale(locale); + assertThat(formatter.pattern(), is(pattern)); + assertThat(formatter.locale(), is(locale)); } public void testSeveralTimeFormats() { - DateFormatter jodaFormatter = DateFormatter.forPattern("year_month_day||ordinal_date"); - DateFormatter javaFormatter = DateFormatter.forPattern("8year_month_day||ordinal_date"); - assertSameDate("2018-12-12", "year_month_day||ordinal_date", jodaFormatter, javaFormatter); - assertSameDate("2018-128", "year_month_day||ordinal_date", jodaFormatter, javaFormatter); + { + String format = "year_month_day||ordinal_date"; + DateFormatter jodaFormatter = Joda.forPattern(format); + DateFormatter javaFormatter = DateFormatter.forPattern(format); + assertSameDate("2018-12-12", format, jodaFormatter, javaFormatter); + assertSameDate("2018-128", format, jodaFormatter, javaFormatter); + } + { + String format = "strictDateOptionalTime||dd-MM-yyyy"; + DateFormatter jodaFormatter = Joda.forPattern(format); + DateFormatter javaFormatter = DateFormatter.forPattern(format); + assertSameDate("31-01-2014", format, jodaFormatter, javaFormatter); + } } private void assertSamePrinterOutput(String format, ZonedDateTime javaDate, DateTime jodaDate) { assertThat(jodaDate.getMillis(), is(javaDate.toInstant().toEpochMilli())); - String javaTimeOut = DateFormatters.forPattern(format).format(javaDate); - String jodaTimeOut = DateFormatter.forPattern(format).formatJoda(jodaDate); + String javaTimeOut = DateFormatter.forPattern(format).format(javaDate); + String jodaTimeOut = Joda.forPattern(format).formatJoda(jodaDate); + if (JavaVersion.current().getVersion().get(0) == 8 && javaTimeOut.endsWith(".0") && (format.equals("epoch_second") || format.equals("epoch_millis"))) { // java 8 has a bug in DateTimeFormatter usage when printing dates that rely on isSupportedBy for fields, which is @@ -611,7 +654,7 @@ private void assertSamePrinterOutput(String format, ZonedDateTime javaDate, Date private void assertSameDate(String input, String format) { DateFormatter jodaFormatter = Joda.forPattern(format); - DateFormatter javaFormatter = DateFormatters.forPattern(format); + DateFormatter javaFormatter = DateFormatter.forPattern(format); assertSameDate(input, format, jodaFormatter, javaFormatter); } @@ -629,7 +672,7 @@ private void assertSameDate(String input, String format, DateFormatter jodaForma private void assertParseException(String input, String format) { assertJodaParseException(input, format, "Invalid format: \"" + input); - assertJavaTimeParseException(input, format, "Text '" + input + "' could not be parsed"); + assertJavaTimeParseException(input, format); } private void assertJodaParseException(String input, String format, String expectedMessage) { @@ -638,9 +681,10 @@ private void assertJodaParseException(String input, String format, String expect assertThat(e.getMessage(), containsString(expectedMessage)); } - private void assertJavaTimeParseException(String input, String format, String expectedMessage) { - DateFormatter javaTimeFormatter = DateFormatters.forPattern(format); - DateTimeParseException dateTimeParseException = expectThrows(DateTimeParseException.class, () -> javaTimeFormatter.parse(input)); - assertThat(dateTimeParseException.getMessage(), startsWith(expectedMessage)); + private void assertJavaTimeParseException(String input, String format) { + DateFormatter javaTimeFormatter = DateFormatter.forPattern(format); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> javaTimeFormatter.parse(input)); + assertThat(e.getMessage(), containsString(input)); + assertThat(e.getMessage(), containsString(format)); } } diff --git a/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java b/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java index e502dfc6f963f..19aea3f19ba3b 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.test.ESTestCase; import org.joda.time.DateTimeZone; +import java.time.Instant; import java.time.ZoneId; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; @@ -35,7 +36,7 @@ public class JodaDateMathParserTests extends ESTestCase { - DateFormatter formatter = DateFormatter.forPattern("dateOptionalTime||epoch_millis"); + DateFormatter formatter = Joda.forPattern("dateOptionalTime||epoch_millis"); DateMathParser parser = formatter.toDateMathParser(); void assertDateMathEquals(String toTest, String expected) { @@ -43,12 +44,12 @@ void assertDateMathEquals(String toTest, String expected) { } void assertDateMathEquals(String toTest, String expected, final long now, boolean roundUp, DateTimeZone timeZone) { - long gotMillis = parser.parse(toTest, () -> now, roundUp, timeZone); + long gotMillis = parser.parse(toTest, () -> now, roundUp, timeZone).toEpochMilli(); assertDateEquals(gotMillis, toTest, expected); } void assertDateEquals(long gotMillis, String original, String expected) { - long expectedMillis = parser.parse(expected, () -> 0); + long expectedMillis = parser.parse(expected, () -> 0).toEpochMilli(); if (gotMillis != expectedMillis) { fail("Date math not equal\n" + "Original : " + original + "\n" + @@ -147,7 +148,7 @@ public void testMultipleAdjustments() { public void testNow() { - final long now = parser.parse("2014-11-18T14:27:32", () -> 0, false, (ZoneId) null); + final long now = parser.parse("2014-11-18T14:27:32", () -> 0, false, (ZoneId) null).toEpochMilli(); assertDateMathEquals("now", "2014-11-18T14:27:32", now, false, null); assertDateMathEquals("now+M", "2014-12-18T14:27:32", now, false, null); @@ -164,10 +165,10 @@ public void testRoundingPreservesEpochAsBaseDate() { DateMathParser parser = formatter.toDateMathParser(); assertEquals( this.formatter.parseMillis("1970-01-01T04:52:20.000Z"), - parser.parse("04:52:20", () -> 0, false, (ZoneId) null)); + parser.parse("04:52:20", () -> 0, false, (ZoneId) null).toEpochMilli()); assertEquals( this.formatter.parseMillis("1970-01-01T04:52:20.999Z"), - parser.parse("04:52:20", () -> 0, true, (ZoneId) null)); + parser.parse("04:52:20", () -> 0, true, (ZoneId) null).toEpochMilli()); } // Implicit rounding happening when parts of the date are not specified @@ -185,9 +186,9 @@ public void testImplicitRounding() { assertDateMathEquals("2014-11-18T09:20", "2014-11-18T08:20:59.999Z", 0, true, DateTimeZone.forID("CET")); // implicit rounding with explicit timezone in the date format - DateFormatter formatter = DateFormatter.forPattern("yyyy-MM-ddZ"); + DateFormatter formatter = Joda.forPattern("yyyy-MM-ddZ"); DateMathParser parser = formatter.toDateMathParser(); - long time = parser.parse("2011-10-09+01:00", () -> 0, false, (ZoneId) null); + Instant time = parser.parse("2011-10-09+01:00", () -> 0, false, (ZoneId) null); assertEquals(this.parser.parse("2011-10-09T00:00:00.000+01:00", () -> 0), time); time = parser.parse("2011-10-09+01:00", () -> 0, true, (ZoneId) null); assertEquals(this.parser.parse("2011-10-09T23:59:59.999+01:00", () -> 0), time); @@ -261,7 +262,7 @@ public void testTimestamps() { // also check other time units JodaDateMathParser parser = new JodaDateMathParser(Joda.forPattern("epoch_second")); - long datetime = parser.parse("1418248078", () -> 0); + long datetime = parser.parse("1418248078", () -> 0).toEpochMilli(); assertDateEquals(datetime, "1418248078", "2014-12-10T21:47:58.000"); // a timestamp before 10000 is a year diff --git a/server/src/test/java/org/elasticsearch/common/joda/JodaTests.java b/server/src/test/java/org/elasticsearch/common/joda/JodaTests.java index fde9d73fae892..003785b3c87b3 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JodaTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JodaTests.java @@ -26,15 +26,18 @@ import java.time.ZoneOffset; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + public class JodaTests extends ESTestCase { public void testBasicTTimePattern() { - DateFormatter formatter1 = DateFormatter.forPattern("basic_t_time"); + DateFormatter formatter1 = Joda.forPattern("basic_t_time"); assertEquals(formatter1.pattern(), "basic_t_time"); assertEquals(formatter1.zone(), ZoneOffset.UTC); - DateFormatter formatter2 = DateFormatter.forPattern("basicTTime"); + DateFormatter formatter2 = Joda.forPattern("basicTTime"); assertEquals(formatter2.pattern(), "basicTTime"); assertEquals(formatter2.zone(), ZoneOffset.UTC); @@ -42,9 +45,25 @@ public void testBasicTTimePattern() { assertEquals("T102030.040Z", formatter1.formatJoda(dt)); assertEquals("T102030.040Z", formatter1.formatJoda(dt)); - expectThrows(IllegalArgumentException.class, () -> DateFormatter.forPattern("basic_t_Time")); - expectThrows(IllegalArgumentException.class, () -> DateFormatter.forPattern("basic_T_Time")); - expectThrows(IllegalArgumentException.class, () -> DateFormatter.forPattern("basic_T_time")); + expectThrows(IllegalArgumentException.class, () -> Joda.forPattern("basic_t_Time")); + expectThrows(IllegalArgumentException.class, () -> Joda.forPattern("basic_T_Time")); + expectThrows(IllegalArgumentException.class, () -> Joda.forPattern("basic_T_time")); } + public void testEqualsAndHashcode() { + String format = randomFrom("yyyy/MM/dd HH:mm:ss", "basic_t_time"); + JodaDateFormatter first = Joda.forPattern(format); + JodaDateFormatter second = Joda.forPattern(format); + JodaDateFormatter third = Joda.forPattern(" HH:mm:ss, yyyy/MM/dd"); + + assertThat(first, is(second)); + assertThat(second, is(first)); + assertThat(first, is(not(third))); + assertThat(second, is(not(third))); + + assertThat(first.hashCode(), is(second.hashCode())); + assertThat(second.hashCode(), is(first.hashCode())); + assertThat(first.hashCode(), is(not(third.hashCode()))); + assertThat(second.hashCode(), is(not(third.hashCode()))); + } } diff --git a/server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java b/server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java deleted file mode 100644 index b6f1b1b650a6f..0000000000000 --- a/server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java +++ /dev/null @@ -1,800 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.joda; - -import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.mapper.RootObjectMapper; -import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.LocalDateTime; -import org.joda.time.MutableDateTime; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.DateTimeFormatterBuilder; -import org.joda.time.format.DateTimeParser; -import org.joda.time.format.ISODateTimeFormat; - -import java.util.Date; -import java.util.Locale; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.endsWith; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -public class SimpleJodaTests extends ESTestCase { - public void testMultiParsers() { - DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); - DateTimeParser[] parsers = new DateTimeParser[3]; - parsers[0] = DateTimeFormat.forPattern("MM/dd/yyyy").withZone(DateTimeZone.UTC).getParser(); - parsers[1] = DateTimeFormat.forPattern("MM-dd-yyyy").withZone(DateTimeZone.UTC).getParser(); - parsers[2] = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss").withZone(DateTimeZone.UTC).getParser(); - builder.append(DateTimeFormat.forPattern("MM/dd/yyyy").withZone(DateTimeZone.UTC).getPrinter(), parsers); - - DateTimeFormatter formatter = builder.toFormatter(); - - formatter.parseMillis("2009-11-15 14:12:12"); - } - - public void testIsoDateFormatDateTimeNoMillisUTC() { - DateTimeFormatter formatter = ISODateTimeFormat.dateTimeNoMillis().withZone(DateTimeZone.UTC); - long millis = formatter.parseMillis("1970-01-01T00:00:00Z"); - - assertThat(millis, equalTo(0L)); - } - - public void testUpperBound() { - MutableDateTime dateTime = new MutableDateTime(3000, 12, 31, 23, 59, 59, 999, DateTimeZone.UTC); - DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC); - - String value = "2000-01-01"; - int i = formatter.parseInto(dateTime, value, 0); - assertThat(i, equalTo(value.length())); - assertThat(dateTime.toString(), equalTo("2000-01-01T23:59:59.999Z")); - } - - public void testIsoDateFormatDateOptionalTimeUTC() { - DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC); - long millis = formatter.parseMillis("1970-01-01T00:00:00Z"); - assertThat(millis, equalTo(0L)); - millis = formatter.parseMillis("1970-01-01T00:00:00.001Z"); - assertThat(millis, equalTo(1L)); - millis = formatter.parseMillis("1970-01-01T00:00:00.1Z"); - assertThat(millis, equalTo(100L)); - millis = formatter.parseMillis("1970-01-01T00:00:00.1"); - assertThat(millis, equalTo(100L)); - millis = formatter.parseMillis("1970-01-01T00:00:00"); - assertThat(millis, equalTo(0L)); - millis = formatter.parseMillis("1970-01-01"); - assertThat(millis, equalTo(0L)); - - millis = formatter.parseMillis("1970"); - assertThat(millis, equalTo(0L)); - - try { - formatter.parseMillis("1970 kuku"); - fail("formatting should fail"); - } catch (IllegalArgumentException e) { - // all is well - } - - // test offset in format - millis = formatter.parseMillis("1970-01-01T00:00:00-02:00"); - assertThat(millis, equalTo(TimeValue.timeValueHours(2).millis())); - } - - public void testIsoVsCustom() { - DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC); - long millis = formatter.parseMillis("1970-01-01T00:00:00"); - assertThat(millis, equalTo(0L)); - - formatter = DateTimeFormat.forPattern("yyyy/MM/dd HH:mm:ss").withZone(DateTimeZone.UTC); - millis = formatter.parseMillis("1970/01/01 00:00:00"); - assertThat(millis, equalTo(0L)); - - DateFormatter formatter2 = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss"); - millis = formatter2.parseMillis("1970/01/01 00:00:00"); - assertThat(millis, equalTo(0L)); - } - - public void testWriteAndParse() { - DateTimeFormatter dateTimeWriter = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC); - DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC); - Date date = new Date(); - assertThat(formatter.parseMillis(dateTimeWriter.print(date.getTime())), equalTo(date.getTime())); - } - - public void testSlashInFormat() { - DateFormatter formatter = DateFormatter.forPattern("MM/yyyy"); - formatter.parseMillis("01/2001"); - - DateFormatter formatter2 = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss"); - long millis = formatter2.parseMillis("1970/01/01 00:00:00"); - formatter2.formatMillis(millis); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - formatter2.parseMillis("1970/01/01")); - } - - public void testMultipleFormats() { - DateFormatter formatter = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd"); - long millis = formatter.parseMillis("1970/01/01 00:00:00"); - assertThat("1970/01/01 00:00:00", is(formatter.formatMillis(millis))); - } - - public void testMultipleDifferentFormats() { - DateFormatter formatter = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd"); - String input = "1970/01/01 00:00:00"; - long millis = formatter.parseMillis(input); - assertThat(input, is(formatter.formatMillis(millis))); - - DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||dateOptionalTime"); - DateFormatter.forPattern("dateOptionalTime||yyyy/MM/dd HH:mm:ss||yyyy/MM/dd"); - DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||dateOptionalTime||yyyy/MM/dd"); - DateFormatter.forPattern("date_time||date_time_no_millis"); - DateFormatter.forPattern(" date_time || date_time_no_millis"); - } - - public void testInvalidPatterns() { - expectInvalidPattern("does_not_exist_pattern", "Invalid format: [does_not_exist_pattern]: Illegal pattern component: o"); - expectInvalidPattern("OOOOO", "Invalid format: [OOOOO]: Illegal pattern component: OOOOO"); - expectInvalidPattern(null, "No date pattern provided"); - expectInvalidPattern("", "No date pattern provided"); - expectInvalidPattern(" ", "No date pattern provided"); - expectInvalidPattern("||date_time_no_millis", "No date pattern provided"); - expectInvalidPattern("date_time_no_millis||", "No date pattern provided"); - } - - private void expectInvalidPattern(String pattern, String errorMessage) { - try { - DateFormatter.forPattern(pattern); - fail("Pattern " + pattern + " should have thrown an exception but did not"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString(errorMessage)); - } - } - - public void testRounding() { - long TIME = utcTimeInMillis("2009-02-03T01:01:01"); - MutableDateTime time = new MutableDateTime(DateTimeZone.UTC); - time.setMillis(TIME); - assertThat(time.monthOfYear().roundFloor().toString(), equalTo("2009-02-01T00:00:00.000Z")); - time.setMillis(TIME); - assertThat(time.hourOfDay().roundFloor().toString(), equalTo("2009-02-03T01:00:00.000Z")); - time.setMillis(TIME); - assertThat(time.dayOfMonth().roundFloor().toString(), equalTo("2009-02-03T00:00:00.000Z")); - } - - public void testRoundingSetOnTime() { - MutableDateTime time = new MutableDateTime(DateTimeZone.UTC); - time.setRounding(time.getChronology().monthOfYear(), MutableDateTime.ROUND_FLOOR); - time.setMillis(utcTimeInMillis("2009-02-03T01:01:01")); - assertThat(time.toString(), equalTo("2009-02-01T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-01T00:00:00.000Z"))); - - time.setMillis(utcTimeInMillis("2009-05-03T01:01:01")); - assertThat(time.toString(), equalTo("2009-05-01T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-05-01T00:00:00.000Z"))); - - time = new MutableDateTime(DateTimeZone.UTC); - time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR); - time.setMillis(utcTimeInMillis("2009-02-03T01:01:01")); - assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-03T00:00:00.000Z"))); - - time.setMillis(utcTimeInMillis("2009-02-02T23:01:01")); - assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-02T00:00:00.000Z"))); - - time = new MutableDateTime(DateTimeZone.UTC); - time.setRounding(time.getChronology().weekOfWeekyear(), MutableDateTime.ROUND_FLOOR); - time.setMillis(utcTimeInMillis("2011-05-05T01:01:01")); - assertThat(time.toString(), equalTo("2011-05-02T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTimeInMillis("2011-05-02T00:00:00.000Z"))); - } - - public void testRoundingWithTimeZone() { - MutableDateTime time = new MutableDateTime(DateTimeZone.UTC); - time.setZone(DateTimeZone.forOffsetHours(-2)); - time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR); - - MutableDateTime utcTime = new MutableDateTime(DateTimeZone.UTC); - utcTime.setRounding(utcTime.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR); - - time.setMillis(utcTimeInMillis("2009-02-03T01:01:01")); - utcTime.setMillis(utcTimeInMillis("2009-02-03T01:01:01")); - - assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000-02:00")); - assertThat(utcTime.toString(), equalTo("2009-02-03T00:00:00.000Z")); - // the time is on the 2nd, and utcTime is on the 3rd, but, because time already encapsulates - // time zone, the millis diff is not 24, but 22 hours - assertThat(time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis())); - - time.setMillis(utcTimeInMillis("2009-02-04T01:01:01")); - utcTime.setMillis(utcTimeInMillis("2009-02-04T01:01:01")); - assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000-02:00")); - assertThat(utcTime.toString(), equalTo("2009-02-04T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis())); - } - - public void testThatEpochsCanBeParsed() { - boolean parseMilliSeconds = randomBoolean(); - - // epoch: 1433144433655 => date: Mon Jun 1 09:40:33.655 CEST 2015 - DateFormatter formatter = DateFormatter.forPattern(parseMilliSeconds ? "epoch_millis" : "epoch_second"); - DateTime dateTime = formatter.parseJoda(parseMilliSeconds ? "1433144433655" : "1433144433"); - - assertThat(dateTime.getYear(), is(2015)); - assertThat(dateTime.getDayOfMonth(), is(1)); - assertThat(dateTime.getMonthOfYear(), is(6)); - assertThat(dateTime.getHourOfDay(), is(7)); // utc timezone, +2 offset due to CEST - assertThat(dateTime.getMinuteOfHour(), is(40)); - assertThat(dateTime.getSecondOfMinute(), is(33)); - - if (parseMilliSeconds) { - assertThat(dateTime.getMillisOfSecond(), is(655)); - } else { - assertThat(dateTime.getMillisOfSecond(), is(0)); - } - - // test floats get truncated - String epochFloatValue = String.format(Locale.US, "%d.%d", dateTime.getMillis() / (parseMilliSeconds ? 1L : 1000L), - randomNonNegativeLong()); - assertThat(formatter.parseJoda(epochFloatValue).getMillis(), is(dateTime.getMillis())); - } - - public void testThatNegativeEpochsCanBeParsed() { - // problem: negative epochs can be arbitrary in size... - boolean parseMilliSeconds = randomBoolean(); - DateFormatter formatter = DateFormatter.forPattern(parseMilliSeconds ? "epoch_millis" : "epoch_second"); - DateTime dateTime = formatter.parseJoda("-10000"); - - assertThat(dateTime.getYear(), is(1969)); - assertThat(dateTime.getMonthOfYear(), is(12)); - assertThat(dateTime.getDayOfMonth(), is(31)); - if (parseMilliSeconds) { - assertThat(dateTime.getHourOfDay(), is(23)); // utc timezone, +2 offset due to CEST - assertThat(dateTime.getMinuteOfHour(), is(59)); - assertThat(dateTime.getSecondOfMinute(), is(50)); - } else { - assertThat(dateTime.getHourOfDay(), is(21)); // utc timezone, +2 offset due to CEST - assertThat(dateTime.getMinuteOfHour(), is(13)); - assertThat(dateTime.getSecondOfMinute(), is(20)); - } - - // test floats get truncated - String epochFloatValue = String.format(Locale.US, "%d.%d", dateTime.getMillis() / (parseMilliSeconds ? 1L : 1000L), - randomNonNegativeLong()); - assertThat(formatter.parseJoda(epochFloatValue).getMillis(), is(dateTime.getMillis())); - - // every negative epoch must be parsed, no matter if exact the size or bigger - if (parseMilliSeconds) { - formatter.parseJoda("-100000000"); - formatter.parseJoda("-999999999999"); - formatter.parseJoda("-1234567890123"); - formatter.parseJoda("-1234567890123456789"); - - formatter.parseJoda("-1234567890123.9999"); - formatter.parseJoda("-1234567890123456789.9999"); - } else { - formatter.parseJoda("-100000000"); - formatter.parseJoda("-1234567890"); - formatter.parseJoda("-1234567890123456"); - - formatter.parseJoda("-1234567890.9999"); - formatter.parseJoda("-1234567890123456.9999"); - } - - assertWarnings("Use of negative values" + - " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); - } - - public void testForInvalidDatesInEpochSecond() { - DateFormatter formatter = DateFormatter.forPattern("epoch_second"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - formatter.parseJoda(randomFrom("invalid date", "12345678901234567", "12345678901234567890"))); - assertThat(e.getMessage(), containsString("Invalid format")); - } - - public void testForInvalidDatesInEpochMillis() { - DateFormatter formatter = DateFormatter.forPattern("epoch_millis"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - formatter.parseJoda(randomFrom("invalid date", "12345678901234567890"))); - assertThat(e.getMessage(), containsString("Invalid format")); - } - - public void testForInvalidTimeZoneWithEpochSeconds() { - DateTimeFormatter dateTimeFormatter = new DateTimeFormatterBuilder() - .append(new Joda.EpochTimeParser(false)) - .toFormatter() - .withZone(DateTimeZone.forOffsetHours(1)) - .withLocale(Locale.ROOT); - DateFormatter formatter = - new JodaDateFormatter("epoch_seconds", dateTimeFormatter, dateTimeFormatter); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - formatter.parseJoda("1433144433655")); - assertThat(e.getMessage(), containsString("time_zone must be UTC")); - } - - public void testForInvalidTimeZoneWithEpochMillis() { - DateTimeFormatter dateTimeFormatter = new DateTimeFormatterBuilder() - .append(new Joda.EpochTimeParser(true)) - .toFormatter() - .withZone(DateTimeZone.forOffsetHours(1)) - .withLocale(Locale.ROOT); - DateFormatter formatter = - new JodaDateFormatter("epoch_millis", dateTimeFormatter, dateTimeFormatter); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - formatter.parseJoda("1433144433")); - assertThat(e.getMessage(), containsString("time_zone must be UTC")); - } - - public void testThatEpochParserIsPrinter() { - JodaDateFormatter formatter = Joda.forPattern("epoch_millis"); - assertThat(formatter.parser.isPrinter(), is(true)); - assertThat(formatter.printer.isPrinter(), is(true)); - - JodaDateFormatter epochSecondFormatter = Joda.forPattern("epoch_second"); - assertThat(epochSecondFormatter.parser.isPrinter(), is(true)); - assertThat(epochSecondFormatter.printer.isPrinter(), is(true)); - } - - public void testThatEpochTimePrinterWorks() { - StringBuffer buffer = new StringBuffer(); - LocalDateTime now = LocalDateTime.now(); - - Joda.EpochTimePrinter epochTimePrinter = new Joda.EpochTimePrinter(false); - epochTimePrinter.printTo(buffer, now, Locale.ROOT); - assertThat(buffer.length(), is(10)); - // only check the last digit, as seconds go from 0-99 in the unix timestamp and don't stop at 60 - assertThat(buffer.toString(), endsWith(String.valueOf(now.getSecondOfMinute() % 10))); - - buffer = new StringBuffer(); - Joda.EpochTimePrinter epochMilliSecondTimePrinter = new Joda.EpochTimePrinter(true); - epochMilliSecondTimePrinter.printTo(buffer, now, Locale.ROOT); - assertThat(buffer.length(), is(13)); - assertThat(buffer.toString(), endsWith(String.valueOf(now.getMillisOfSecond()))); - } - - public void testThatEpochParserIsIdempotent() { - DateFormatter formatter = DateFormatter.forPattern("epoch_millis"); - DateTime dateTime = formatter.parseJoda("1234567890123"); - assertThat(dateTime.getMillis(), is(1234567890123L)); - dateTime = formatter.parseJoda("1234567890456"); - assertThat(dateTime.getMillis(), is(1234567890456L)); - dateTime = formatter.parseJoda("1234567890789"); - assertThat(dateTime.getMillis(), is(1234567890789L)); - dateTime = formatter.parseJoda("1234567890123456789"); - assertThat(dateTime.getMillis(), is(1234567890123456789L)); - - DateFormatter secondsFormatter = DateFormatter.forPattern("epoch_second"); - DateTime secondsDateTime = secondsFormatter.parseJoda("1234567890"); - assertThat(secondsDateTime.getMillis(), is(1234567890000L)); - secondsDateTime = secondsFormatter.parseJoda("1234567890"); - assertThat(secondsDateTime.getMillis(), is(1234567890000L)); - secondsDateTime = secondsFormatter.parseJoda("1234567890"); - assertThat(secondsDateTime.getMillis(), is(1234567890000L)); - secondsDateTime = secondsFormatter.parseJoda("1234567890123456"); - assertThat(secondsDateTime.getMillis(), is(1234567890123456000L)); - } - - public void testThatDefaultFormatterChecksForCorrectYearLength() throws Exception { - // if no strict version is tested, this means the date format is already strict by itself - // yyyyMMdd - assertValidDateFormatParsing("basicDate", "20140303"); - assertDateFormatParsingThrowingException("basicDate", "2010303"); - - // yyyyMMdd’T'HHmmss.SSSZ - assertValidDateFormatParsing("basicDateTime", "20140303T124343.123Z"); - assertValidDateFormatParsing("basicDateTime", "00050303T124343.123Z"); - assertDateFormatParsingThrowingException("basicDateTime", "50303T124343.123Z"); - - // yyyyMMdd’T'HHmmssZ - assertValidDateFormatParsing("basicDateTimeNoMillis", "20140303T124343Z"); - assertValidDateFormatParsing("basicDateTimeNoMillis", "00050303T124343Z"); - assertDateFormatParsingThrowingException("basicDateTimeNoMillis", "50303T124343Z"); - - // yyyyDDD - assertValidDateFormatParsing("basicOrdinalDate", "0005165"); - assertDateFormatParsingThrowingException("basicOrdinalDate", "5165"); - - // yyyyDDD’T'HHmmss.SSSZ - assertValidDateFormatParsing("basicOrdinalDateTime", "0005165T124343.123Z"); - assertValidDateFormatParsing("basicOrdinalDateTime", "0005165T124343.123Z"); - assertDateFormatParsingThrowingException("basicOrdinalDateTime", "5165T124343.123Z"); - - // yyyyDDD’T'HHmmssZ - assertValidDateFormatParsing("basicOrdinalDateTimeNoMillis", "0005165T124343Z"); - assertValidDateFormatParsing("basicOrdinalDateTimeNoMillis", "0005165T124343Z"); - assertDateFormatParsingThrowingException("basicOrdinalDateTimeNoMillis", "5165T124343Z"); - - // HHmmss.SSSZ - assertValidDateFormatParsing("basicTime", "090909.123Z"); - assertDateFormatParsingThrowingException("basicTime", "90909.123Z"); - - // HHmmssZ - assertValidDateFormatParsing("basicTimeNoMillis", "090909Z"); - assertDateFormatParsingThrowingException("basicTimeNoMillis", "90909Z"); - - // 'T’HHmmss.SSSZ - assertValidDateFormatParsing("basicTTime", "T090909.123Z"); - assertDateFormatParsingThrowingException("basicTTime", "T90909.123Z"); - - // T’HHmmssZ - assertValidDateFormatParsing("basicTTimeNoMillis", "T090909Z"); - assertDateFormatParsingThrowingException("basicTTimeNoMillis", "T90909Z"); - - // xxxx’W'wwe - assertValidDateFormatParsing("basicWeekDate", "0005W414"); - assertValidDateFormatParsing("basicWeekDate", "5W414", "0005W414"); - assertDateFormatParsingThrowingException("basicWeekDate", "5W14"); - - assertValidDateFormatParsing("strictBasicWeekDate", "0005W414"); - assertDateFormatParsingThrowingException("strictBasicWeekDate", "0005W47"); - assertDateFormatParsingThrowingException("strictBasicWeekDate", "5W414"); - assertDateFormatParsingThrowingException("strictBasicWeekDate", "5W14"); - - // xxxx’W'wwe’T'HHmmss.SSSZ - assertValidDateFormatParsing("basicWeekDateTime", "0005W414T124343.123Z"); - assertValidDateFormatParsing("basicWeekDateTime", "5W414T124343.123Z", "0005W414T124343.123Z"); - assertDateFormatParsingThrowingException("basicWeekDateTime", "5W14T124343.123Z"); - - assertValidDateFormatParsing("strictBasicWeekDateTime", "0005W414T124343.123Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTime", "0005W47T124343.123Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTime", "5W414T124343.123Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTime", "5W14T124343.123Z"); - - // xxxx’W'wwe’T'HHmmssZ - assertValidDateFormatParsing("basicWeekDateTimeNoMillis", "0005W414T124343Z"); - assertValidDateFormatParsing("basicWeekDateTimeNoMillis", "5W414T124343Z", "0005W414T124343Z"); - assertDateFormatParsingThrowingException("basicWeekDateTimeNoMillis", "5W14T124343Z"); - - assertValidDateFormatParsing("strictBasicWeekDateTimeNoMillis", "0005W414T124343Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTimeNoMillis", "0005W47T124343Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTimeNoMillis", "5W414T124343Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTimeNoMillis", "5W14T124343Z"); - - // yyyy-MM-dd - assertValidDateFormatParsing("date", "0005-06-03"); - assertValidDateFormatParsing("date", "5-6-3", "0005-06-03"); - - assertValidDateFormatParsing("strictDate", "0005-06-03"); - assertDateFormatParsingThrowingException("strictDate", "5-6-3"); - assertDateFormatParsingThrowingException("strictDate", "0005-06-3"); - assertDateFormatParsingThrowingException("strictDate", "0005-6-03"); - assertDateFormatParsingThrowingException("strictDate", "5-06-03"); - - // yyyy-MM-dd'T'HH - assertValidDateFormatParsing("dateHour", "0005-06-03T12"); - assertValidDateFormatParsing("dateHour", "5-6-3T1", "0005-06-03T01"); - - assertValidDateFormatParsing("strictDateHour", "0005-06-03T12"); - assertDateFormatParsingThrowingException("strictDateHour", "5-6-3T1"); - - // yyyy-MM-dd'T'HH:mm - assertValidDateFormatParsing("dateHourMinute", "0005-06-03T12:12"); - assertValidDateFormatParsing("dateHourMinute", "5-6-3T12:1", "0005-06-03T12:01"); - - assertValidDateFormatParsing("strictDateHourMinute", "0005-06-03T12:12"); - assertDateFormatParsingThrowingException("strictDateHourMinute", "5-6-3T12:1"); - - // yyyy-MM-dd'T'HH:mm:ss - assertValidDateFormatParsing("dateHourMinuteSecond", "0005-06-03T12:12:12"); - assertValidDateFormatParsing("dateHourMinuteSecond", "5-6-3T12:12:1", "0005-06-03T12:12:01"); - - assertValidDateFormatParsing("strictDateHourMinuteSecond", "0005-06-03T12:12:12"); - assertDateFormatParsingThrowingException("strictDateHourMinuteSecond", "5-6-3T12:12:1"); - - // yyyy-MM-dd’T'HH:mm:ss.SSS - assertValidDateFormatParsing("dateHourMinuteSecondFraction", "0005-06-03T12:12:12.123"); - assertValidDateFormatParsing("dateHourMinuteSecondFraction", "5-6-3T12:12:1.123", "0005-06-03T12:12:01.123"); - assertValidDateFormatParsing("dateHourMinuteSecondFraction", "5-6-3T12:12:1.1", "0005-06-03T12:12:01.100"); - - assertValidDateFormatParsing("strictDateHourMinuteSecondFraction", "0005-06-03T12:12:12.123"); - assertDateFormatParsingThrowingException("strictDateHourMinuteSecondFraction", "5-6-3T12:12:12.1"); - assertDateFormatParsingThrowingException("strictDateHourMinuteSecondFraction", "5-6-3T12:12:12.12"); - - assertValidDateFormatParsing("dateHourMinuteSecondMillis", "0005-06-03T12:12:12.123"); - assertValidDateFormatParsing("dateHourMinuteSecondMillis", "5-6-3T12:12:1.123", "0005-06-03T12:12:01.123"); - assertValidDateFormatParsing("dateHourMinuteSecondMillis", "5-6-3T12:12:1.1", "0005-06-03T12:12:01.100"); - - assertValidDateFormatParsing("strictDateHourMinuteSecondMillis", "0005-06-03T12:12:12.123"); - assertDateFormatParsingThrowingException("strictDateHourMinuteSecondMillis", "5-6-3T12:12:12.1"); - assertDateFormatParsingThrowingException("strictDateHourMinuteSecondMillis", "5-6-3T12:12:12.12"); - - // yyyy-MM-dd'T'HH:mm:ss.SSSZ - assertValidDateFormatParsing("dateOptionalTime", "2014-03-03", "2014-03-03T00:00:00.000Z"); - assertValidDateFormatParsing("dateOptionalTime", "1257-3-03", "1257-03-03T00:00:00.000Z"); - assertValidDateFormatParsing("dateOptionalTime", "0005-03-3", "0005-03-03T00:00:00.000Z"); - assertValidDateFormatParsing("dateOptionalTime", "5-03-03", "0005-03-03T00:00:00.000Z"); - assertValidDateFormatParsing("dateOptionalTime", "5-03-03T1:1:1.1", "0005-03-03T01:01:01.100Z"); - assertValidDateFormatParsing("strictDateOptionalTime", "2014-03-03", "2014-03-03T00:00:00.000Z"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "0005-3-03"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "0005-03-3"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03T1:1:1.1"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03T01:01:01.1"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03T01:01:1.100"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03T01:1:01.100"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03T1:01:01.100"); - - // yyyy-MM-dd’T'HH:mm:ss.SSSZZ - assertValidDateFormatParsing("dateTime", "5-03-03T1:1:1.1Z", "0005-03-03T01:01:01.100Z"); - assertValidDateFormatParsing("strictDateTime", "2014-03-03T11:11:11.100Z", "2014-03-03T11:11:11.100Z"); - assertDateFormatParsingThrowingException("strictDateTime", "0005-03-03T1:1:1.1Z"); - assertDateFormatParsingThrowingException("strictDateTime", "0005-03-03T01:01:1.100Z"); - assertDateFormatParsingThrowingException("strictDateTime", "0005-03-03T01:1:01.100Z"); - assertDateFormatParsingThrowingException("strictDateTime", "0005-03-03T1:01:01.100Z"); - - // yyyy-MM-dd’T'HH:mm:ssZZ - assertValidDateFormatParsing("dateTimeNoMillis", "5-03-03T1:1:1Z", "0005-03-03T01:01:01Z"); - assertValidDateFormatParsing("strictDateTimeNoMillis", "2014-03-03T11:11:11Z", "2014-03-03T11:11:11Z"); - assertDateFormatParsingThrowingException("strictDateTimeNoMillis", "0005-03-03T1:1:1Z"); - assertDateFormatParsingThrowingException("strictDateTimeNoMillis", "0005-03-03T01:01:1Z"); - assertDateFormatParsingThrowingException("strictDateTimeNoMillis", "0005-03-03T01:1:01Z"); - assertDateFormatParsingThrowingException("strictDateTimeNoMillis", "0005-03-03T1:01:01Z"); - - // HH - assertValidDateFormatParsing("hour", "12"); - assertValidDateFormatParsing("hour", "1", "01"); - assertValidDateFormatParsing("strictHour", "12"); - assertValidDateFormatParsing("strictHour", "01"); - assertDateFormatParsingThrowingException("strictHour", "1"); - - // HH:mm - assertValidDateFormatParsing("hourMinute", "12:12"); - assertValidDateFormatParsing("hourMinute", "12:1", "12:01"); - assertValidDateFormatParsing("strictHourMinute", "12:12"); - assertValidDateFormatParsing("strictHourMinute", "12:01"); - assertDateFormatParsingThrowingException("strictHourMinute", "12:1"); - - // HH:mm:ss - assertValidDateFormatParsing("hourMinuteSecond", "12:12:12"); - assertValidDateFormatParsing("hourMinuteSecond", "12:12:1", "12:12:01"); - assertValidDateFormatParsing("strictHourMinuteSecond", "12:12:12"); - assertValidDateFormatParsing("strictHourMinuteSecond", "12:12:01"); - assertDateFormatParsingThrowingException("strictHourMinuteSecond", "12:12:1"); - - // HH:mm:ss.SSS - assertValidDateFormatParsing("hourMinuteSecondFraction", "12:12:12.123"); - assertValidDateFormatParsing("hourMinuteSecondFraction", "12:12:12.1", "12:12:12.100"); - assertValidDateFormatParsing("strictHourMinuteSecondFraction", "12:12:12.123"); - assertValidDateFormatParsing("strictHourMinuteSecondFraction", "12:12:12.1", "12:12:12.100"); - - assertValidDateFormatParsing("hourMinuteSecondMillis", "12:12:12.123"); - assertValidDateFormatParsing("hourMinuteSecondMillis", "12:12:12.1", "12:12:12.100"); - assertValidDateFormatParsing("strictHourMinuteSecondMillis", "12:12:12.123"); - assertValidDateFormatParsing("strictHourMinuteSecondMillis", "12:12:12.1", "12:12:12.100"); - - // yyyy-DDD - assertValidDateFormatParsing("ordinalDate", "5-3", "0005-003"); - assertValidDateFormatParsing("strictOrdinalDate", "0005-003"); - assertDateFormatParsingThrowingException("strictOrdinalDate", "5-3"); - assertDateFormatParsingThrowingException("strictOrdinalDate", "0005-3"); - assertDateFormatParsingThrowingException("strictOrdinalDate", "5-003"); - - // yyyy-DDD’T'HH:mm:ss.SSSZZ - assertValidDateFormatParsing("ordinalDateTime", "5-3T12:12:12.100Z", "0005-003T12:12:12.100Z"); - assertValidDateFormatParsing("strictOrdinalDateTime", "0005-003T12:12:12.100Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTime", "5-3T1:12:12.123Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTime", "5-3T12:1:12.123Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTime", "5-3T12:12:1.123Z"); - - // yyyy-DDD’T'HH:mm:ssZZ - assertValidDateFormatParsing("ordinalDateTimeNoMillis", "5-3T12:12:12Z", "0005-003T12:12:12Z"); - assertValidDateFormatParsing("strictOrdinalDateTimeNoMillis", "0005-003T12:12:12Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTimeNoMillis", "5-3T1:12:12Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTimeNoMillis", "5-3T12:1:12Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTimeNoMillis", "5-3T12:12:1Z"); - - - // HH:mm:ss.SSSZZ - assertValidDateFormatParsing("time", "12:12:12.100Z"); - assertValidDateFormatParsing("time", "01:01:01.1Z", "01:01:01.100Z"); - assertValidDateFormatParsing("time", "1:1:1.1Z", "01:01:01.100Z"); - assertValidDateFormatParsing("strictTime", "12:12:12.100Z"); - assertDateFormatParsingThrowingException("strictTime", "12:12:1.100Z"); - assertDateFormatParsingThrowingException("strictTime", "12:1:12.100Z"); - assertDateFormatParsingThrowingException("strictTime", "1:12:12.100Z"); - - // HH:mm:ssZZ - assertValidDateFormatParsing("timeNoMillis", "12:12:12Z"); - assertValidDateFormatParsing("timeNoMillis", "01:01:01Z", "01:01:01Z"); - assertValidDateFormatParsing("timeNoMillis", "1:1:1Z", "01:01:01Z"); - assertValidDateFormatParsing("strictTimeNoMillis", "12:12:12Z"); - assertDateFormatParsingThrowingException("strictTimeNoMillis", "12:12:1Z"); - assertDateFormatParsingThrowingException("strictTimeNoMillis", "12:1:12Z"); - assertDateFormatParsingThrowingException("strictTimeNoMillis", "1:12:12Z"); - - // 'T’HH:mm:ss.SSSZZ - assertValidDateFormatParsing("tTime", "T12:12:12.100Z"); - assertValidDateFormatParsing("tTime", "T01:01:01.1Z", "T01:01:01.100Z"); - assertValidDateFormatParsing("tTime", "T1:1:1.1Z", "T01:01:01.100Z"); - assertValidDateFormatParsing("strictTTime", "T12:12:12.100Z"); - assertDateFormatParsingThrowingException("strictTTime", "T12:12:1.100Z"); - assertDateFormatParsingThrowingException("strictTTime", "T12:1:12.100Z"); - assertDateFormatParsingThrowingException("strictTTime", "T1:12:12.100Z"); - - // 'T’HH:mm:ssZZ - assertValidDateFormatParsing("tTimeNoMillis", "T12:12:12Z"); - assertValidDateFormatParsing("tTimeNoMillis", "T01:01:01Z", "T01:01:01Z"); - assertValidDateFormatParsing("tTimeNoMillis", "T1:1:1Z", "T01:01:01Z"); - assertValidDateFormatParsing("strictTTimeNoMillis", "T12:12:12Z"); - assertDateFormatParsingThrowingException("strictTTimeNoMillis", "T12:12:1Z"); - assertDateFormatParsingThrowingException("strictTTimeNoMillis", "T12:1:12Z"); - assertDateFormatParsingThrowingException("strictTTimeNoMillis", "T1:12:12Z"); - - // xxxx-'W’ww-e - assertValidDateFormatParsing("weekDate", "0005-W4-1", "0005-W04-1"); - assertValidDateFormatParsing("strictWeekDate", "0005-W04-1"); - assertDateFormatParsingThrowingException("strictWeekDate", "0005-W4-1"); - - // xxxx-'W’ww-e’T'HH:mm:ss.SSSZZ - assertValidDateFormatParsing("weekDateTime", "0005-W41-4T12:43:43.123Z"); - assertValidDateFormatParsing("weekDateTime", "5-W41-4T12:43:43.123Z", "0005-W41-4T12:43:43.123Z"); - assertValidDateFormatParsing("strictWeekDateTime", "0005-W41-4T12:43:43.123Z"); - assertValidDateFormatParsing("strictWeekDateTime", "0005-W06-4T12:43:43.123Z"); - assertDateFormatParsingThrowingException("strictWeekDateTime", "0005-W4-7T12:43:43.123Z"); - assertDateFormatParsingThrowingException("strictWeekDateTime", "5-W41-4T12:43:43.123Z"); - assertDateFormatParsingThrowingException("strictWeekDateTime", "5-W1-4T12:43:43.123Z"); - - // xxxx-'W’ww-e’T'HH:mm:ssZZ - assertValidDateFormatParsing("weekDateTimeNoMillis", "0005-W41-4T12:43:43Z"); - assertValidDateFormatParsing("weekDateTimeNoMillis", "5-W41-4T12:43:43Z", "0005-W41-4T12:43:43Z"); - assertValidDateFormatParsing("strictWeekDateTimeNoMillis", "0005-W41-4T12:43:43Z"); - assertValidDateFormatParsing("strictWeekDateTimeNoMillis", "0005-W06-4T12:43:43Z"); - assertDateFormatParsingThrowingException("strictWeekDateTimeNoMillis", "0005-W4-7T12:43:43Z"); - assertDateFormatParsingThrowingException("strictWeekDateTimeNoMillis", "5-W41-4T12:43:43Z"); - assertDateFormatParsingThrowingException("strictWeekDateTimeNoMillis", "5-W1-4T12:43:43Z"); - - // yyyy - assertValidDateFormatParsing("weekyear", "2014"); - assertValidDateFormatParsing("weekyear", "5", "0005"); - assertValidDateFormatParsing("weekyear", "0005"); - assertValidDateFormatParsing("strictWeekyear", "2014"); - assertValidDateFormatParsing("strictWeekyear", "0005"); - assertDateFormatParsingThrowingException("strictWeekyear", "5"); - - // yyyy-'W'ee - assertValidDateFormatParsing("weekyearWeek", "2014-W41"); - assertValidDateFormatParsing("weekyearWeek", "2014-W1", "2014-W01"); - assertValidDateFormatParsing("strictWeekyearWeek", "2014-W41"); - assertDateFormatParsingThrowingException("strictWeekyearWeek", "2014-W1"); - - // weekyearWeekDay - assertValidDateFormatParsing("weekyearWeekDay", "2014-W41-1"); - assertValidDateFormatParsing("weekyearWeekDay", "2014-W1-1", "2014-W01-1"); - assertValidDateFormatParsing("strictWeekyearWeekDay", "2014-W41-1"); - assertDateFormatParsingThrowingException("strictWeekyearWeekDay", "2014-W1-1"); - - // yyyy - assertValidDateFormatParsing("year", "2014"); - assertValidDateFormatParsing("year", "5", "0005"); - assertValidDateFormatParsing("strictYear", "2014"); - assertDateFormatParsingThrowingException("strictYear", "5"); - - // yyyy-mm - assertValidDateFormatParsing("yearMonth", "2014-12"); - assertValidDateFormatParsing("yearMonth", "2014-5", "2014-05"); - assertValidDateFormatParsing("strictYearMonth", "2014-12"); - assertDateFormatParsingThrowingException("strictYearMonth", "2014-5"); - - // yyyy-mm-dd - assertValidDateFormatParsing("yearMonthDay", "2014-12-12"); - assertValidDateFormatParsing("yearMonthDay", "2014-05-5", "2014-05-05"); - assertValidDateFormatParsing("strictYearMonthDay", "2014-12-12"); - assertDateFormatParsingThrowingException("strictYearMonthDay", "2014-05-5"); - } - - public void testThatRootObjectParsingIsStrict() throws Exception { - String[] datesThatWork = new String[] { "2014/10/10", "2014/10/10 12:12:12", "2014-05-05", "2014-05-05T12:12:12.123Z" }; - String[] datesThatShouldNotWork = new String[]{ "5-05-05", "2014-5-05", "2014-05-5", - "2014-05-05T1:12:12.123Z", "2014-05-05T12:1:12.123Z", "2014-05-05T12:12:1.123Z", - "4/10/10", "2014/1/10", "2014/10/1", - "2014/10/10 1:12:12", "2014/10/10 12:1:12", "2014/10/10 12:12:1" - }; - - // good case - for (String date : datesThatWork) { - boolean dateParsingSuccessful = false; - for (DateFormatter dateTimeFormatter : RootObjectMapper.Defaults.DYNAMIC_DATE_TIME_FORMATTERS) { - try { - dateTimeFormatter.parseMillis(date); - dateParsingSuccessful = true; - break; - } catch (Exception e) {} - } - if (!dateParsingSuccessful) { - fail("Parsing for date " + date + " in root object mapper failed, but shouldnt"); - } - } - - // bad case - for (String date : datesThatShouldNotWork) { - for (DateFormatter dateTimeFormatter : RootObjectMapper.Defaults.DYNAMIC_DATE_TIME_FORMATTERS) { - try { - dateTimeFormatter.parseMillis(date); - fail(String.format(Locale.ROOT, "Expected exception when parsing date %s in root mapper", date)); - } catch (Exception e) {} - } - } - } - - public void testDeprecatedFormatSpecifiers() { - Joda.forPattern("CC"); - assertWarnings("Use of 'C' (century-of-era) is deprecated and will not be supported in the" + - " next major version of Elasticsearch."); - Joda.forPattern("YYYY"); - assertWarnings("Use of 'Y' (year-of-era) will change to 'y' in the" + - " next major version of Elasticsearch. Prefix your date format with '8' to use the new specifier."); - Joda.forPattern("xxxx"); - assertWarnings("Use of 'x' (week-based-year) will change" + - " to 'Y' in the next major version of Elasticsearch. Prefix your date format with '8' to use the new specifier."); - // multiple deprecations - Joda.forPattern("CC-YYYY"); - assertWarnings("Use of 'C' (century-of-era) is deprecated and will not be supported in the" + - " next major version of Elasticsearch.", "Use of 'Y' (year-of-era) will change to 'y' in the" + - " next major version of Elasticsearch. Prefix your date format with '8' to use the new specifier."); - } - - public void testDeprecatedEpochScientificNotation() { - assertValidDateFormatParsing("epoch_second", "1.234e5", "123400"); - assertWarnings("Use of scientific notation" + - " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); - assertValidDateFormatParsing("epoch_millis", "1.234e5", "123400"); - assertWarnings("Use of scientific notation" + - " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); - } - - public void testDeprecatedEpochNegative() { - assertValidDateFormatParsing("epoch_second", "-12345", "-12345"); - assertWarnings("Use of negative values" + - " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); - assertValidDateFormatParsing("epoch_millis", "-12345", "-12345"); - assertWarnings("Use of negative values" + - " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); - } - - private void assertValidDateFormatParsing(String pattern, String dateToParse) { - assertValidDateFormatParsing(pattern, dateToParse, dateToParse); - } - - private void assertValidDateFormatParsing(String pattern, String dateToParse, String expectedDate) { - DateFormatter formatter = DateFormatter.forPattern(pattern); - assertThat(formatter.formatMillis(formatter.parseMillis(dateToParse)), is(expectedDate)); - } - - private void assertDateFormatParsingThrowingException(String pattern, String invalidDate) { - try { - DateFormatter formatter = DateFormatter.forPattern(pattern); - formatter.parseMillis(invalidDate); - fail(String.format(Locale.ROOT, "Expected parsing exception for pattern [%s] with date [%s], but did not happen", - pattern, invalidDate)); - } catch (IllegalArgumentException e) { - } - } - - private long utcTimeInMillis(String time) { - return ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC).parseMillis(time); - } - -} diff --git a/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java b/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java index 7e3dbdd5b94df..3ee4ce0e7d7bf 100644 --- a/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java +++ b/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java @@ -19,9 +19,11 @@ package org.elasticsearch.common.rounding; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.time.ZoneOffset; @@ -42,6 +44,7 @@ public void testSerialization() throws Exception { rounding = org.elasticsearch.common.Rounding.builder(timeValue()).timeZone(ZoneOffset.UTC).build(); } BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(VersionUtils.getPreviousVersion(Version.V_7_0_0)); rounding.writeTo(output); Rounding roundingJoda = Rounding.Streams.read(output.bytes().streamInput()); diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index fc732fbd88e2e..efb46db59de96 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -316,6 +316,91 @@ public void testTupleAffixUpdateConsumer() { assertEquals(0, results.size()); } + public void testAffixGroupUpdateConsumer() { + String prefix = randomAlphaOfLength(3) + "foo."; + String intSuffix = randomAlphaOfLength(3); + String listSuffix = randomAlphaOfLength(4); + Setting.AffixSetting intSetting = Setting.affixKeySetting(prefix, intSuffix, + (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); + Setting.AffixSetting> listSetting = Setting.affixKeySetting(prefix, listSuffix, + (k) -> Setting.listSetting(k, Arrays.asList("1"), Integer::parseInt, Property.Dynamic, Property.NodeScope)); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY,new HashSet<>(Arrays.asList(intSetting, listSetting))); + Map results = new HashMap<>(); + Function listBuilder = g -> (prefix + g + "." + listSuffix); + Function intBuilder = g -> (prefix + g + "." + intSuffix); + String group1 = randomAlphaOfLength(3); + String group2 = randomAlphaOfLength(4); + String group3 = randomAlphaOfLength(5); + BiConsumer listConsumer = results::put; + + service.addAffixGroupUpdateConsumer(Arrays.asList(intSetting, listSetting), listConsumer); + assertEquals(0, results.size()); + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group2), "18", "19", "20") + .build()); + Settings groupOneSettings = results.get(group1); + Settings groupTwoSettings = results.get(group2); + assertEquals(2, intSetting.getConcreteSettingForNamespace(group1).get(groupOneSettings).intValue()); + assertEquals(7, intSetting.getConcreteSettingForNamespace(group2).get(groupTwoSettings).intValue()); + assertEquals(Arrays.asList(16, 17), listSetting.getConcreteSettingForNamespace(group1).get(groupOneSettings)); + assertEquals(Arrays.asList(18, 19, 20), listSetting.getConcreteSettingForNamespace(group2).get(groupTwoSettings)); + assertEquals(2, groupOneSettings.size()); + assertEquals(2, groupTwoSettings.size()); + assertEquals(2, results.size()); + + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putNull(listBuilder.apply(group2)) // removed + .build()); + + assertNull(group1 + " wasn't changed", results.get(group1)); + groupTwoSettings = results.get(group2); + assertEquals(7, intSetting.getConcreteSettingForNamespace(group2).get(groupTwoSettings).intValue()); + assertEquals(Arrays.asList(1), listSetting.getConcreteSettingForNamespace(group2).get(groupTwoSettings)); + assertEquals(1, results.size()); + assertEquals(2, groupTwoSettings.size()); + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group3), "5", "6") // added + .build()); + assertNull(group1 + " wasn't changed", results.get(group1)); + assertNull(group2 + " wasn't changed", results.get(group2)); + + Settings groupThreeSettings = results.get(group3); + assertEquals(1, intSetting.getConcreteSettingForNamespace(group3).get(groupThreeSettings).intValue()); + assertEquals(Arrays.asList(5, 6), listSetting.getConcreteSettingForNamespace(group3).get(groupThreeSettings)); + assertEquals(1, results.size()); + assertEquals(1, groupThreeSettings.size()); + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 4) // modified + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group3), "5", "6") + .build()); + assertNull(group2 + " wasn't changed", results.get(group2)); + assertNull(group3 + " wasn't changed", results.get(group3)); + + groupOneSettings = results.get(group1); + assertEquals(4, intSetting.getConcreteSettingForNamespace(group1).get(groupOneSettings).intValue()); + assertEquals(Arrays.asList(16, 17), listSetting.getConcreteSettingForNamespace(group1).get(groupOneSettings)); + assertEquals(1, results.size()); + assertEquals(2, groupOneSettings.size()); + results.clear(); + } + public void testAddConsumerAffix() { Setting.AffixSetting intSetting = Setting.affixKeySetting("foo.", "bar", (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index a2858284593d1..96ef39e430178 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -24,7 +24,8 @@ import java.time.Instant; import java.time.ZoneId; import java.time.ZonedDateTime; -import java.time.format.DateTimeParseException; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; import java.util.Locale; @@ -55,13 +56,13 @@ public void testEpochMillisParser() { } } - public void testEpochMilliParser() { + public void testInvalidEpochMilliParser() { DateFormatter formatter = DateFormatters.forPattern("epoch_millis"); - DateTimeParseException e = expectThrows(DateTimeParseException.class, () -> formatter.parse("invalid")); - assertThat(e.getMessage(), containsString("could not be parsed")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("invalid")); + assertThat(e.getMessage(), containsString("failed to parse date field [invalid] with format [epoch_millis]")); - e = expectThrows(DateTimeParseException.class, () -> formatter.parse("123.1234567")); - assertThat(e.getMessage(), containsString("unparsed text found at index 3")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("123.1234567")); + assertThat(e.getMessage(), containsString("failed to parse date field [123.1234567] with format [epoch_millis]")); } // this is not in the duelling tests, because the epoch second parser in joda time drops the milliseconds after the comma @@ -70,14 +71,14 @@ public void testEpochMilliParser() { public void testEpochSecondParser() { DateFormatter formatter = DateFormatters.forPattern("epoch_second"); - DateTimeParseException e = expectThrows(DateTimeParseException.class, () -> formatter.parse("1234.1")); - assertThat(e.getMessage(), is("Text '1234.1' could not be parsed, unparsed text found at index 4")); - e = expectThrows(DateTimeParseException.class, () -> formatter.parse("1234.")); - assertThat(e.getMessage(), is("Text '1234.' could not be parsed, unparsed text found at index 4")); - e = expectThrows(DateTimeParseException.class, () -> formatter.parse("abc")); - assertThat(e.getMessage(), is("Text 'abc' could not be parsed, unparsed text found at index 0")); - e = expectThrows(DateTimeParseException.class, () -> formatter.parse("1234.abc")); - assertThat(e.getMessage(), is("Text '1234.abc' could not be parsed, unparsed text found at index 4")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("1234.1234567890")); + assertThat(e.getMessage(), is("failed to parse date field [1234.1234567890] with format [epoch_second]")); + e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("1234.123456789013221")); + assertThat(e.getMessage(), containsString("[1234.123456789013221]")); + e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("abc")); + assertThat(e.getMessage(), containsString("[abc]")); + e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("1234.abc")); + assertThat(e.getMessage(), containsString("[1234.abc]")); } public void testEpochMilliParsersWithDifferentFormatters() { @@ -137,7 +138,7 @@ public void testEqualsAndHashcode() { assertThat(epochMillisFormatter, equalTo(DateFormatters.forPattern("epoch_millis"))); } - public void testForceJava8() { + public void testSupportBackwardsJava8Format() { assertThat(DateFormatter.forPattern("8yyyy-MM-dd"), instanceOf(JavaDateFormatter.class)); // named formats too assertThat(DateFormatter.forPattern("8date_optional_time"), instanceOf(JavaDateFormatter.class)); @@ -157,4 +158,58 @@ public void testParsingStrictNanoDates() { formatter.format(formatter.parse("2018-05-15T17:14:56.123456789+0100")); formatter.format(formatter.parse("2018-05-15T17:14:56.123456789+01:00")); } + + public void testRoundupFormatterWithEpochDates() { + assertRoundupFormatter("epoch_millis", "1234567890", 1234567890L); + // also check nanos of the epoch_millis formatter if it is rounded up to the nano second + DateTimeFormatter roundUpFormatter = ((JavaDateFormatter) DateFormatter.forPattern("8epoch_millis")).getRoundupParser(); + Instant epochMilliInstant = DateFormatters.toZonedDateTime(roundUpFormatter.parse("1234567890")).toInstant(); + assertThat(epochMilliInstant.getLong(ChronoField.NANO_OF_SECOND), is(890_999_999L)); + + assertRoundupFormatter("strict_date_optional_time||epoch_millis", "2018-10-10T12:13:14.123Z", 1539173594123L); + assertRoundupFormatter("strict_date_optional_time||epoch_millis", "1234567890", 1234567890L); + assertRoundupFormatter("strict_date_optional_time||epoch_millis", "2018-10-10", 1539215999999L); + assertRoundupFormatter("uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_millis", "2018-10-10T12:13:14.123", 1539173594123L); + assertRoundupFormatter("uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_millis", "1234567890", 1234567890L); + + assertRoundupFormatter("epoch_second", "1234567890", 1234567890999L); + // also check nanos of the epoch_millis formatter if it is rounded up to the nano second + DateTimeFormatter epochSecondRoundupParser = ((JavaDateFormatter) DateFormatter.forPattern("8epoch_second")).getRoundupParser(); + Instant epochSecondInstant = DateFormatters.toZonedDateTime(epochSecondRoundupParser.parse("1234567890")).toInstant(); + assertThat(epochSecondInstant.getLong(ChronoField.NANO_OF_SECOND), is(999_999_999L)); + + assertRoundupFormatter("strict_date_optional_time||epoch_second", "2018-10-10T12:13:14.123Z", 1539173594123L); + assertRoundupFormatter("strict_date_optional_time||epoch_second", "1234567890", 1234567890999L); + assertRoundupFormatter("strict_date_optional_time||epoch_second", "2018-10-10", 1539215999999L); + assertRoundupFormatter("uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_second", "2018-10-10T12:13:14.123", 1539173594123L); + assertRoundupFormatter("uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_second", "1234567890", 1234567890999L); + } + + private void assertRoundupFormatter(String format, String input, long expectedMilliSeconds) { + JavaDateFormatter dateFormatter = (JavaDateFormatter) DateFormatter.forPattern(format); + dateFormatter.parse(input); + DateTimeFormatter roundUpFormatter = dateFormatter.getRoundupParser(); + long millis = DateFormatters.toZonedDateTime(roundUpFormatter.parse(input)).toInstant().toEpochMilli(); + assertThat(millis, is(expectedMilliSeconds)); + } + + public void testRoundupFormatterZone() { + ZoneId zoneId = randomZone(); + String format = randomFrom("epoch_second", "epoch_millis", "strict_date_optional_time", "uuuu-MM-dd'T'HH:mm:ss.SSS", + "strict_date_optional_time||date_optional_time"); + JavaDateFormatter formatter = (JavaDateFormatter) DateFormatter.forPattern(format).withZone(zoneId); + DateTimeFormatter roundUpFormatter = formatter.getRoundupParser(); + assertThat(roundUpFormatter.getZone(), is(zoneId)); + assertThat(formatter.zone(), is(zoneId)); + } + + public void testRoundupFormatterLocale() { + Locale locale = randomLocale(random()); + String format = randomFrom("epoch_second", "epoch_millis", "strict_date_optional_time", "uuuu-MM-dd'T'HH:mm:ss.SSS", + "strict_date_optional_time||date_optional_time"); + JavaDateFormatter formatter = (JavaDateFormatter) DateFormatter.forPattern(format).withLocale(locale); + DateTimeFormatter roundupParser = formatter.getRoundupParser(); + assertThat(roundupParser.getLocale(), is(locale)); + assertThat(formatter.locale(), is(locale)); + } } diff --git a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java index 8d702ebee8388..2b8d89bc68bae 100644 --- a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java @@ -39,8 +39,6 @@ public class JavaDateMathParserTests extends ESTestCase { private final DateMathParser parser = formatter.toDateMathParser(); public void testBasicDates() { - assertDateMathEquals("2014", "2014-01-01T00:00:00.000"); - assertDateMathEquals("2014-05", "2014-05-01T00:00:00.000"); assertDateMathEquals("2014-05-30", "2014-05-30T00:00:00.000"); assertDateMathEquals("2014-05-30T20", "2014-05-30T20:00:00.000"); assertDateMathEquals("2014-05-30T20:21", "2014-05-30T20:21:00.000"); @@ -125,7 +123,7 @@ public void testMultipleAdjustments() { } public void testNow() { - final long now = parser.parse("2014-11-18T14:27:32", () -> 0, false, (ZoneId) null); + final long now = parser.parse("2014-11-18T14:27:32", () -> 0, false, (ZoneId) null).toEpochMilli(); assertDateMathEquals("now", "2014-11-18T14:27:32", now, false, null); assertDateMathEquals("now+M", "2014-12-18T14:27:32", now, false, null); @@ -142,11 +140,11 @@ public void testRoundingPreservesEpochAsBaseDate() { DateMathParser parser = formatter.toDateMathParser(); ZonedDateTime zonedDateTime = DateFormatters.toZonedDateTime(formatter.parse("04:52:20")); assertThat(zonedDateTime.getYear(), is(1970)); - long millisStart = zonedDateTime.toInstant().toEpochMilli(); + Instant millisStart = zonedDateTime.toInstant(); assertEquals(millisStart, parser.parse("04:52:20", () -> 0, false, (ZoneId) null)); // due to rounding up, we have to add the number of milliseconds here manually long millisEnd = DateFormatters.toZonedDateTime(formatter.parse("04:52:20")).toInstant().toEpochMilli() + 999; - assertEquals(millisEnd, parser.parse("04:52:20", () -> 0, true, (ZoneId) null)); + assertEquals(millisEnd, parser.parse("04:52:20", () -> 0, true, (ZoneId) null).toEpochMilli()); } // Implicit rounding happening when parts of the date are not specified @@ -166,9 +164,10 @@ public void testImplicitRounding() { // implicit rounding with explicit timezone in the date format DateFormatter formatter = DateFormatters.forPattern("yyyy-MM-ddXXX"); DateMathParser parser = formatter.toDateMathParser(); - long time = parser.parse("2011-10-09+01:00", () -> 0, false, (ZoneId) null); + Instant time = parser.parse("2011-10-09+01:00", () -> 0, false, (ZoneId) null); assertEquals(this.parser.parse("2011-10-09T00:00:00.000+01:00", () -> 0), time); - time = parser.parse("2011-10-09+01:00", () -> 0, true, (ZoneId) null); + time = DateFormatter.forPattern("strict_date_optional_time_nanos").toDateMathParser() + .parse("2011-10-09T23:59:59.999+01:00", () -> 0, false, (ZoneId) null); assertEquals(this.parser.parse("2011-10-09T23:59:59.999+01:00", () -> 0), time); } @@ -176,7 +175,6 @@ public void testImplicitRounding() { public void testExplicitRounding() { assertDateMathEquals("2014-11-18||/y", "2014-01-01", 0, false, null); assertDateMathEquals("2014-11-18||/y", "2014-12-31T23:59:59.999", 0, true, null); - assertDateMathEquals("2014||/y", "2014-01-01", 0, false, null); assertDateMathEquals("2014-01-01T00:00:00.001||/y", "2014-12-31T23:59:59.999", 0, true, null); // rounding should also take into account time zone assertDateMathEquals("2014-11-18||/y", "2013-12-31T23:00:00.000Z", 0, false, ZoneId.of("CET")); @@ -239,16 +237,16 @@ public void testTimestamps() { assertDateMathEquals("1418248078000||/m", "2014-12-10T21:47:00.000"); // also check other time units - DateMathParser parser = DateFormatter.forPattern("8epoch_second||dateOptionalTime").toDateMathParser(); - long datetime = parser.parse("1418248078", () -> 0); + DateMathParser parser = DateFormatter.forPattern("epoch_second||dateOptionalTime").toDateMathParser(); + long datetime = parser.parse("1418248078", () -> 0).toEpochMilli(); assertDateEquals(datetime, "1418248078", "2014-12-10T21:47:58.000"); // a timestamp before 10000 is a year - assertDateMathEquals("9999", "9999-01-01T00:00:00.000"); + assertDateMathEquals("9999", "1970-01-01T00:00:09.999Z"); // 10000 is also a year, breaking bwc, used to be a timestamp - assertDateMathEquals("10000", "10000-01-01T00:00:00.000"); + assertDateMathEquals("10000", "1970-01-01T00:00:10.000Z"); // but 10000 with T is still a date format - assertDateMathEquals("10000T", "10000-01-01T00:00:00.000"); + assertDateMathEquals("10000-01-01T", "10000-01-01T00:00:00.000"); } void assertParseException(String msg, String date, String exc) { @@ -266,7 +264,7 @@ public void testIllegalMathFormat() { public void testIllegalDateFormat() { assertParseException("Expected bad timestamp exception", Long.toString(Long.MAX_VALUE) + "0", "failed to parse date field"); - assertParseException("Expected bad date format exception", "123bogus", "Unrecognized chars at the end of [123bogus]"); + assertParseException("Expected bad date format exception", "123bogus", "failed to parse date field [123bogus]"); } public void testOnlyCallsNowIfNecessary() { @@ -286,12 +284,12 @@ private void assertDateMathEquals(String toTest, String expected) { } private void assertDateMathEquals(String toTest, String expected, final long now, boolean roundUp, ZoneId timeZone) { - long gotMillis = parser.parse(toTest, () -> now, roundUp, timeZone); + long gotMillis = parser.parse(toTest, () -> now, roundUp, timeZone).toEpochMilli(); assertDateEquals(gotMillis, toTest, expected); } private void assertDateEquals(long gotMillis, String original, String expected) { - long expectedMillis = parser.parse(expected, () -> 0); + long expectedMillis = parser.parse(expected, () -> 0).toEpochMilli(); if (gotMillis != expectedMillis) { ZonedDateTime zonedDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(gotMillis), ZoneOffset.UTC); fail("Date math not equal\n" + diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 6f47590089f28..efe69ff99e20b 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -95,6 +95,7 @@ public void testIsolatedUnicastNodes() throws Exception { * The temporal unicast responses is empty. When partition is solved the one ping response contains a master node. * The rejoining node should take this master node and connect. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37687") public void testUnicastSinglePingResponseContainsMaster() throws Exception { internalCluster().setHostsListContainsOnlyFirstNode(true); List nodes = startCluster(4); @@ -131,6 +132,7 @@ public void testUnicastSinglePingResponseContainsMaster() throws Exception { /** * Test cluster join with issues in cluster state publishing * */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37685") public void testClusterJoinDespiteOfPublishingIssues() throws Exception { String masterNode = internalCluster().startMasterOnlyNode(); String nonMasterNode = internalCluster().startDataOnlyNode(); @@ -200,6 +202,7 @@ public void testClusterFormingWithASlowNode() throws Exception { ensureStableCluster(3); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37539") public void testElectMasterWithLatestVersion() throws Exception { final Set nodes = new HashSet<>(internalCluster().startNodes(3)); ensureStableCluster(3); diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 9051f302a591f..e237415a9c60e 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -53,6 +54,7 @@ import java.util.function.Supplier; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class DiscoveryModuleTests extends ESTestCase { @@ -87,11 +89,12 @@ default Map> getDiscoveryTypes(ThreadPool threadPool @Before public void setupDummyServices() { - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, null, null); + threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); masterService = mock(MasterService.class); namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); clusterApplier = mock(ClusterApplier.class); - threadPool = mock(ThreadPool.class); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); gatewayMetaState = mock(GatewayMetaState.class); } diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java index cc9295cee2e3f..b04a1cebbf3e3 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java @@ -62,6 +62,7 @@ public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(FileBasedUnicastHostsProviderTests.class.getName()); executorService = Executors.newSingleThreadExecutor(); + createTransportSvc(); } @After @@ -77,8 +78,7 @@ public void tearDown() throws Exception { } } - @Before - public void createTransportSvc() { + private void createTransportSvc() { final MockNioTransport transport = new MockNioTransport(Settings.EMPTY, Version.CURRENT, threadPool, new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java new file mode 100644 index 0000000000000..bdca86858701f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class NodeEnvironmentIT extends ESIntegTestCase { + public void testStartFailureOnDataForNonDataNode() throws Exception { + final String indexName = "test-fail-on-data"; + + logger.info("--> starting one node"); + internalCluster().startNode(); + + logger.info("--> creating index"); + prepareCreate(indexName, Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + ).get(); + final String indexUUID = resolveIndex(indexName).getUUID(); + + logger.info("--> indexing a simple document"); + client().prepareIndex(indexName, "type1", "1").setSource("field1", "value1").get(); + + logger.info("--> restarting the node with node.data=true"); + internalCluster().restartRandomDataNode(); + + logger.info("--> restarting the node with node.data=false"); + IllegalStateException ex = expectThrows(IllegalStateException.class, + "Node started with node.data=false and existing shard data must fail", + () -> + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).build(); + } + })); + assertThat(ex.getMessage(), containsString(indexUUID)); + assertThat(ex.getMessage(), + startsWith("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data")); + } +} diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 55ab02c1dcfb9..2771bc9f243ac 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -52,6 +53,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; @LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to allow extras public class NodeEnvironmentTests extends ESTestCase { @@ -470,6 +472,47 @@ public void testExistingTempFiles() throws IOException { } } + public void testEnsureNoShardData() throws IOException { + Settings settings = buildEnvSettings(Settings.EMPTY); + Index index = new Index("test", "testUUID"); + + try (NodeEnvironment env = newNodeEnvironment(settings)) { + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve(MetaDataStateFormat.STATE_DIR_NAME)); + } + } + + // build settings using same path.data as original but with node.data=false + Settings noDataSettings = Settings.builder() + .put(settings) + .put(Node.NODE_DATA_SETTING.getKey(), false).build(); + + String shardDataDirName = Integer.toString(randomInt(10)); + Path shardPath; + + // test that we can create data=false env with only meta information + try (NodeEnvironment env = newNodeEnvironment(noDataSettings)) { + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve(shardDataDirName)); + } + shardPath = env.indexPaths(index)[0]; + } + + IllegalStateException ex = expectThrows(IllegalStateException.class, + "Must fail creating NodeEnvironment on a data path that has shard data if node.data=false", + () -> newNodeEnvironment(noDataSettings).close()); + + assertThat(ex.getMessage(), + containsString(shardPath.resolve(shardDataDirName).toAbsolutePath().toString())); + assertThat(ex.getMessage(), + startsWith("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data")); + + // test that we can create data=true env + newNodeEnvironment(settings).close(); + } + /** Converts an array of Strings to an array of Paths, adding an additional child if specified */ private Path[] stringsToPaths(String[] strings, String additional) { Path[] locations = new Path[strings.length]; diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index 7a6b751de74f9..22259b919ec6f 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -412,6 +412,12 @@ public void testAtomicityWithFailures() throws IOException { } catch (WriteStateException e) { if (e.isDirty()) { possibleMetaData.add(metaData); + /* + * If dirty WriteStateException occurred, it's only safe to proceed if there is subsequent + * successful write of metadata and Manifest. We prefer to break here, not to over complicate test logic. + * See also MetaDataStateFormat#testFailRandomlyAndReadAnyState, that does not break. + */ + break; } } } diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index 2522452f72774..40f3bd8a01623 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -408,6 +408,12 @@ public void testFailRandomlyAndReadAnyState() throws IOException { Path[] randomPaths = randomSubsetOf(randomIntBetween(1, paths.length), paths).toArray(new Path[0]); DummyState stateOnDisk = format.loadLatestState(logger, NamedXContentRegistry.EMPTY, randomPaths); assertTrue(possibleStates.contains(stateOnDisk)); + if (possibleStates.size() > 1) { + //if there was a WriteStateException we need to override current state before we continue + newState = writeAndReadStateSuccessfully(format, paths); + possibleStates.clear(); + possibleStates.add(newState); + } } writeAndReadStateSuccessfully(format, paths); diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java index e5cdcc1ac7320..b345afe9b8f89 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -70,7 +70,7 @@ public void testReadOnlyEngine() throws Exception { lastDocIds = getDocIds(engine, true); assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); - assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); + assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); for (int i = 0; i < numDocs; i++) { if (randomBoolean()) { String delId = Integer.toString(i); @@ -126,7 +126,7 @@ public void testFlushes() throws IOException { if (rarely()) { engine.flush(); } - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + globalCheckpoint.set(i); } engine.syncTranslog(); engine.flushAndClose(); @@ -139,6 +139,40 @@ public void testFlushes() throws IOException { } } + public void testEnsureMaxSeqNoIsEqualToGlobalCheckpoint() throws IOException { + IOUtils.close(engine, store); + Engine readOnlyEngine = null; + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + final int numDocs = scaledRandomIntBetween(10, 100); + try (InternalEngine engine = createEngine(config)) { + long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + for (int i = 0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, + System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); + maxSeqNo = engine.getLocalCheckpoint(); + } + globalCheckpoint.set(engine.getLocalCheckpoint() - 1); + engine.syncTranslog(); + engine.flushAndClose(); + + IllegalStateException exception = expectThrows(IllegalStateException.class, + () -> new ReadOnlyEngine(engine.engineConfig, null, null, true, Function.identity()) { + @Override + protected void assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) { + // we don't want the assertion to trip in this test + } + }); + assertThat(exception.getMessage(), equalTo("Maximum sequence number [" + maxSeqNo + + "] from last commit does not match global checkpoint [" + globalCheckpoint.get() + "]")); + } finally { + IOUtils.close(readOnlyEngine); + } + } + } + public void testReadOnly() throws IOException { IOUtils.close(engine, store); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 8b437d25a8495..38b3d5a2f1ff2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -21,21 +21,23 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; import org.junit.Before; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.Collection; import static org.hamcrest.Matchers.containsString; @@ -173,7 +175,8 @@ public void testIgnoreMalformed() throws Exception { .endObject()), XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); - assertThat(e.getCause().getMessage(), containsString("Cannot parse \"2016-03-99\"")); + assertThat(e.getCause().getMessage(), + containsString("failed to parse date field [2016-03-99] with format [strict_date_optional_time||epoch_millis]")); mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "date") @@ -217,36 +220,13 @@ public void testChangeFormat() throws IOException { assertEquals(1457654400000L, pointField.numericValue().longValue()); } - public void testFloatEpochFormat() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "date") - .field("format", "epoch_millis").endObject().endObject() - .endObject().endObject()); - - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - - assertEquals(mapping, mapper.mappingSource().toString()); - - long epochMillis = randomNonNegativeLong(); - String epochFloatValue = epochMillis + "." + randomIntBetween(0, 999); - - ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", epochFloatValue) - .endObject()), - XContentType.JSON)); - - IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); - IndexableField pointField = fields[0]; - assertEquals(epochMillis, pointField.numericValue().longValue()); - } - public void testChangeLocale() throws IOException { + assumeTrue("need java 9 for testing ",JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "date").field("locale", "fr").endObject().endObject() - .endObject().endObject()); + .startObject("properties").startObject("field").field("type", "date") + .field("format", "E, d MMM yyyy HH:mm:ss Z") + .field("locale", "de") + .endObject().endObject().endObject().endObject()); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); @@ -255,7 +235,7 @@ public void testChangeLocale() throws IOException { mapper.parse(new SourceToParse("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() .startObject() - .field("field", 1457654400) + .field("field", "Mi., 06 Dez. 2000 02:55:00 -0800") .endObject()), XContentType.JSON)); } @@ -340,12 +320,8 @@ public void testEmptyName() throws IOException { assertThat(e.getMessage(), containsString("name cannot be empty string")); } - /** - * Test that time zones are correctly parsed by the {@link DateFieldMapper}. - * There is a known bug with Joda 2.9.4 reported in https://github.com/JodaOrg/joda-time/issues/373. - */ public void testTimeZoneParsing() throws Exception { - final String timeZonePattern = "yyyy-MM-dd" + randomFrom("ZZZ", "[ZZZ]", "'['ZZZ']'"); + final String timeZonePattern = "yyyy-MM-dd" + randomFrom("XXX", "[XXX]", "'['XXX']'"); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() .startObject("type") @@ -360,20 +336,22 @@ public void testTimeZoneParsing() throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - final DateTimeZone randomTimeZone = randomBoolean() ? DateTimeZone.forID(randomFrom("UTC", "CET")) : randomDateTimeZone(); - final DateTime randomDate = new DateTime(2016, 03, 11, 0, 0, 0, randomTimeZone); + DateFormatter formatter = DateFormatter.forPattern(timeZonePattern); + final ZoneId randomTimeZone = randomBoolean() ? ZoneId.of(randomFrom("UTC", "CET")) : randomZone(); + final ZonedDateTime randomDate = ZonedDateTime.of(2016, 3, 11, 0, 0, 0, 0, randomTimeZone); ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() .startObject() - .field("field", DateTimeFormat.forPattern(timeZonePattern).print(randomDate)) + .field("field", formatter.format(randomDate)) .endObject()), XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); - assertEquals(randomDate.withZone(DateTimeZone.UTC).getMillis(), fields[0].numericValue().longValue()); + long millis = randomDate.withZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli(); + assertEquals(millis, fields[0].numericValue().longValue()); } public void testMergeDate() throws IOException { @@ -429,6 +407,6 @@ public void testIllegalFormatField() throws Exception { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parser.parse("type", new CompressedXContent(mapping))); - assertEquals("Invalid format: [[test_format]]: expected string value", e.getMessage()); + assertEquals("Invalid format: [[test_format]]: Unknown pattern letter: t", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index 072170aff09dd..d4058d50f74a2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; @@ -45,6 +46,7 @@ import org.junit.Before; import java.io.IOException; +import java.time.ZoneOffset; import java.util.Locale; public class DateFieldTypeTests extends FieldTypeTestCase { @@ -67,7 +69,7 @@ public void modify(MappedFieldType ft) { addModifier(new Modifier("locale", false) { @Override public void modify(MappedFieldType ft) { - ((DateFieldType) ft).setDateTimeFormatter(DateFormatter.forPattern("date_optional_time").withLocale(Locale.CANADA)); + ((DateFieldType) ft).setDateTimeFormatter(DateFormatter.forPattern("strict_date_optional_time").withLocale(Locale.CANADA)); } }); nowInMillis = randomNonNegativeLong(); @@ -110,8 +112,10 @@ private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader, public void testIsFieldWithinQuery() throws IOException { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); - long instant1 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda("2015-10-12").getMillis(); - long instant2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda("2016-04-03").getMillis(); + long instant1 = + DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-12")).toInstant().toEpochMilli(); + long instant2 = + DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2016-04-03")).toInstant().toEpochMilli(); Document doc = new Document(); LongPoint field = new LongPoint("my_date", instant1); doc.add(field); @@ -138,25 +142,27 @@ public void testIsFieldWithinQuery() throws IOException { public void testValueFormat() { MappedFieldType ft = createDefaultFieldType(); - long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda("2015-10-12T14:10:55").getMillis(); + long instant = DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-12T14:10:55")) + .toInstant().toEpochMilli(); + assertEquals("2015-10-12T14:10:55.000Z", - ft.docValueFormat(null, DateTimeZone.UTC).format(instant)); + ft.docValueFormat(null, ZoneOffset.UTC).format(instant)); assertEquals("2015-10-12T15:10:55.000+01:00", - ft.docValueFormat(null, DateTimeZone.forOffsetHours(1)).format(instant)); + ft.docValueFormat(null, ZoneOffset.ofHours(1)).format(instant)); assertEquals("2015", - createDefaultFieldType().docValueFormat("yyyy", DateTimeZone.UTC).format(instant)); + createDefaultFieldType().docValueFormat("YYYY", ZoneOffset.UTC).format(instant)); assertEquals(instant, - ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12T14:10:55", false, null)); + ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12T14:10:55", false, null)); assertEquals(instant + 999, - ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12T14:10:55", true, null)); - assertEquals(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda("2015-10-13").getMillis() - 1, - ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12||/d", true, null)); + ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12T14:10:55", true, null)); + long i = DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-13")).toInstant().toEpochMilli(); + assertEquals(i - 1, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12||/d", true, null)); } public void testValueForSearch() { MappedFieldType ft = createDefaultFieldType(); String date = "2015-10-12T12:09:55.000Z"; - long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date).getMillis(); + long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(date); assertEquals(date, ft.valueForDisplay(instant)); } @@ -170,7 +176,7 @@ public void testTermQuery() { MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); String date = "2015-10-12T14:10:55"; - long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date).getMillis(); + long instant = DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)).toInstant().toEpochMilli(); ft.setIndexOptions(IndexOptions.DOCS); Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", instant, instant + 999), @@ -193,8 +199,9 @@ public void testRangeQuery() throws IOException { ft.setName("field"); String date1 = "2015-10-12T14:10:55"; String date2 = "2016-04-28T11:33:52"; - long instant1 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date1).getMillis(); - long instant2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date2).getMillis() + 999; + long instant1 = DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date1)).toInstant().toEpochMilli(); + long instant2 = + DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date2)).toInstant().toEpochMilli() + 999; ft.setIndexOptions(IndexOptions.DOCS); Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", instant1, instant2), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 56e6f5e4c6b04..b3539d9994334 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; +import java.time.Instant; import java.util.Collection; import java.util.Collections; @@ -455,7 +456,7 @@ public void testReuseExistingMappings() throws IOException, Exception { .field("my_field3", 44) .field("my_field4", 45) .field("my_field5", 46) - .field("my_field6", 47) + .field("my_field6", Instant.now().toEpochMilli()) .field("my_field7", true) .endObject()); Mapper myField1Mapper = null; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index ff44dec81d962..0b066fbd7162d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -60,8 +60,6 @@ public void testMatchTypeOnly() throws Exception { assertThat(mapperService.fullName("l"), notNullValue()); assertNotSame(IndexOptions.NONE, mapperService.fullName("l").indexOptions()); - - } public void testSimple() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index fcb78f66add5e..65dcd396ed740 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -458,7 +458,7 @@ public void testIllegalFormatField() throws Exception { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parser.parse("type", new CompressedXContent(mapping))); - assertEquals("Invalid format: [[test_format]]: expected string value", e.getMessage()); + assertEquals("Invalid format: [[test_format]]: Unknown pattern letter: t", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index 34e7081d51d5d..699f85f1b12b1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -104,11 +104,12 @@ public void testDateRangeQuery() throws Exception { DateMathParser parser = type.dateMathParser; Query query = new QueryStringQueryBuilder(DATE_RANGE_FIELD_NAME + ":[2010-01-01 TO 2018-01-01]").toQuery(createShardContext()); Query range = LongRange.newIntersectsQuery(DATE_RANGE_FIELD_NAME, - new long[]{ parser.parse("2010-01-01", () -> 0)}, new long[]{ parser.parse("2018-01-01", () -> 0)}); + new long[]{ parser.parse("2010-01-01", () -> 0).toEpochMilli()}, + new long[]{ parser.parse("2018-01-01", () -> 0).toEpochMilli()}); Query dv = RangeFieldMapper.RangeType.DATE.dvRangeQuery(DATE_RANGE_FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.INTERSECTS, - parser.parse("2010-01-01", () -> 0), - parser.parse("2018-01-01", () -> 0), true, true); + parser.parse("2010-01-01", () -> 0).toEpochMilli(), + parser.parse("2018-01-01", () -> 0).toEpochMilli(), true, true); assertEquals(new IndexOrDocValuesQuery(range, dv), query); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java index f04a193ef96b2..6ca98fb4db6d2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java @@ -48,6 +48,8 @@ import java.net.InetAddress; import java.util.Locale; +import static org.hamcrest.Matchers.containsString; + public class RangeFieldTypeTests extends FieldTypeTestCase { RangeType type; protected static String FIELDNAME = "field"; @@ -111,17 +113,18 @@ public void testDateRangeQueryUsingMappingFormat() { fieldType.setHasDocValues(false); ShapeRelation relation = randomFrom(ShapeRelation.values()); - // dates will break the default format + // dates will break the default format, month/day of month is turned around in the format final String from = "2016-15-06T15:29:50+08:00"; final String to = "2016-16-06T15:29:50+08:00"; ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, () -> fieldType.rangeQuery(from, to, true, true, relation, null, null, context)); - assertEquals("failed to parse date field [2016-15-06T15:29:50+08:00] with format [strict_date_optional_time||epoch_millis]", - ex.getMessage()); + assertThat(ex.getMessage(), + containsString("failed to parse date field [2016-15-06T15:29:50+08:00] with format [strict_date_optional_time||epoch_millis]") + ); // setting mapping format which is compatible with those dates - final DateFormatter formatter = DateFormatter.forPattern("yyyy-dd-MM'T'HH:mm:ssZZ"); + final DateFormatter formatter = DateFormatter.forPattern("yyyy-dd-MM'T'HH:mm:ssZZZZZ"); assertEquals(1465975790000L, formatter.parseMillis(from)); assertEquals(1466062190000L, formatter.parseMillis(to)); diff --git a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index 5c76c77b5c888..257ee807419b6 100644 --- a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -140,6 +140,11 @@ public void testEqualsAndHashcode() { } } + public static InnerHitBuilder randomNestedInnerHits() { + InnerHitBuilder innerHitBuilder = randomInnerHits(); + innerHitBuilder.setSeqNoAndPrimaryTerm(false); // not supported by nested queries + return innerHitBuilder; + } public static InnerHitBuilder randomInnerHits() { InnerHitBuilder innerHits = new InnerHitBuilder(); innerHits.setName(randomAlphaOfLengthBetween(1, 16)); @@ -147,6 +152,7 @@ public static InnerHitBuilder randomInnerHits() { innerHits.setSize(randomIntBetween(0, 32)); innerHits.setExplain(randomBoolean()); innerHits.setVersion(randomBoolean()); + innerHits.setSeqNoAndPrimaryTerm(randomBoolean()); innerHits.setTrackScores(randomBoolean()); if (randomBoolean()) { innerHits.setStoredFieldNames(randomListStuff(16, () -> randomAlphaOfLengthBetween(1, 16))); @@ -189,6 +195,7 @@ static InnerHitBuilder mutate(InnerHitBuilder original) throws IOException { modifiers.add(() -> copy.setSize(randomValueOtherThan(copy.getSize(), () -> randomIntBetween(0, 128)))); modifiers.add(() -> copy.setExplain(!copy.isExplain())); modifiers.add(() -> copy.setVersion(!copy.isVersion())); + modifiers.add(() -> copy.setSeqNoAndPrimaryTerm(!copy.isSeqNoAndPrimaryTerm())); modifiers.add(() -> copy.setTrackScores(!copy.isTrackScores())); modifiers.add(() -> copy.setName(randomValueOtherThan(copy.getName(), () -> randomAlphaOfLengthBetween(1, 16)))); modifiers.add(() -> { diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 76479791283b4..ac9ae8d0fa7fb 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -45,7 +45,7 @@ import java.util.Map; import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta; -import static org.elasticsearch.index.query.InnerHitBuilderTests.randomInnerHits; +import static org.elasticsearch.index.query.InnerHitBuilderTests.randomNestedInnerHits; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -267,7 +267,7 @@ public void testThatUnrecognizedFromStringThrowsException() { } public void testInlineLeafInnerHitsNestedQuery() throws Exception { - InnerHitBuilder leafInnerHits = randomInnerHits(); + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None); nestedQueryBuilder.innerHit(leafInnerHits); Map innerHitBuilders = new HashMap<>(); @@ -276,7 +276,7 @@ public void testInlineLeafInnerHitsNestedQuery() throws Exception { } public void testInlineLeafInnerHitsNestedQueryViaBoolQuery() { - InnerHitBuilder leafInnerHits = randomInnerHits(); + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None) .innerHit(leafInnerHits); BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder().should(nestedQueryBuilder); @@ -286,7 +286,7 @@ public void testInlineLeafInnerHitsNestedQueryViaBoolQuery() { } public void testInlineLeafInnerHitsNestedQueryViaConstantScoreQuery() { - InnerHitBuilder leafInnerHits = randomInnerHits(); + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None) .innerHit(leafInnerHits); ConstantScoreQueryBuilder constantScoreQueryBuilder = new ConstantScoreQueryBuilder(nestedQueryBuilder); @@ -296,10 +296,10 @@ public void testInlineLeafInnerHitsNestedQueryViaConstantScoreQuery() { } public void testInlineLeafInnerHitsNestedQueryViaBoostingQuery() { - InnerHitBuilder leafInnerHits1 = randomInnerHits(); + InnerHitBuilder leafInnerHits1 = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder1 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None) .innerHit(leafInnerHits1); - InnerHitBuilder leafInnerHits2 = randomInnerHits(); + InnerHitBuilder leafInnerHits2 = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder2 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None) .innerHit(leafInnerHits2); BoostingQueryBuilder constantScoreQueryBuilder = new BoostingQueryBuilder(nestedQueryBuilder1, nestedQueryBuilder2); @@ -310,7 +310,7 @@ public void testInlineLeafInnerHitsNestedQueryViaBoostingQuery() { } public void testInlineLeafInnerHitsNestedQueryViaFunctionScoreQuery() { - InnerHitBuilder leafInnerHits = randomInnerHits(); + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None) .innerHit(leafInnerHits); FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(nestedQueryBuilder); @@ -330,7 +330,7 @@ public void testBuildIgnoreUnmappedNestQuery() throws Exception { when(mapperService.getIndexSettings()).thenReturn(settings); when(searchContext.mapperService()).thenReturn(mapperService); - InnerHitBuilder leafInnerHits = randomInnerHits(); + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder query1 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None); query1.innerHit(leafInnerHits); final Map innerHitBuilders = new HashMap<>(); diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 0eb6de7da252f..6f72277007dd5 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -64,9 +64,10 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.Matchers; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.DateTimeException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -177,7 +178,7 @@ protected QueryStringQueryBuilder doCreateTestQueryBuilder() { queryStringQueryBuilder.minimumShouldMatch(randomMinimumShouldMatch()); } if (randomBoolean()) { - queryStringQueryBuilder.timeZone(randomDateTimeZone().getID()); + queryStringQueryBuilder.timeZone(randomZone().getId()); } if (randomBoolean()) { queryStringQueryBuilder.autoGenerateSynonymsPhraseQuery(randomBoolean()); @@ -211,7 +212,7 @@ public QueryStringQueryBuilder mutateInstance(QueryStringQueryBuilder instance) String quoteFieldSuffix = instance.quoteFieldSuffix(); Float tieBreaker = instance.tieBreaker(); String minimumShouldMatch = instance.minimumShouldMatch(); - String timeZone = instance.timeZone() == null ? null : instance.timeZone().getID(); + String timeZone = instance.timeZone() == null ? null : instance.timeZone().getId(); boolean autoGenerateSynonymsPhraseQuery = instance.autoGenerateSynonymsPhraseQuery(); boolean fuzzyTranspositions = instance.fuzzyTranspositions(); @@ -319,12 +320,12 @@ public QueryStringQueryBuilder mutateInstance(QueryStringQueryBuilder instance) break; case 20: if (timeZone == null) { - timeZone = randomDateTimeZone().getID(); + timeZone = randomZone().getId(); } else { if (randomBoolean()) { timeZone = null; } else { - timeZone = randomValueOtherThan(timeZone, () -> randomDateTimeZone().getID()); + timeZone = randomValueOtherThan(timeZone, () -> randomZone().getId()); } } break; @@ -848,7 +849,7 @@ public void testTimezone() throws Exception { QueryBuilder queryBuilder = parseQuery(queryAsString); assertThat(queryBuilder, instanceOf(QueryStringQueryBuilder.class)); QueryStringQueryBuilder queryStringQueryBuilder = (QueryStringQueryBuilder) queryBuilder; - assertThat(queryStringQueryBuilder.timeZone(), equalTo(DateTimeZone.forID("Europe/Paris"))); + assertThat(queryStringQueryBuilder.timeZone(), equalTo(ZoneId.of("Europe/Paris"))); String invalidQueryAsString = "{\n" + " \"query_string\":{\n" + @@ -856,7 +857,7 @@ public void testTimezone() throws Exception { " \"query\":\"" + DATE_FIELD_NAME + ":[2012 TO 2014]\"\n" + " }\n" + "}"; - expectThrows(IllegalArgumentException.class, () -> parseQuery(invalidQueryAsString)); + expectThrows(DateTimeException.class, () -> parseQuery(invalidQueryAsString)); } public void testToQueryBooleanQueryMultipleBoosts() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index df312ba84c309..52f2c89d645f9 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -44,10 +44,12 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import org.joda.time.chrono.ISOChronology; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.HashMap; import java.util.Map; @@ -72,18 +74,22 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { break; case 1: // use mapped date field, using date string representation + Instant now = Instant.now(); + ZonedDateTime start = now.minusMillis(randomIntBetween(0, 1000000)).atZone(ZoneOffset.UTC); + ZonedDateTime end = now.plusMillis(randomIntBetween(0, 1000000)).atZone(ZoneOffset.UTC); query = new RangeQueryBuilder(randomFrom( DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, DATE_ALIAS_FIELD_NAME)); - query.from(new DateTime(System.currentTimeMillis() - randomIntBetween(0, 1000000), DateTimeZone.UTC).toString()); - query.to(new DateTime(System.currentTimeMillis() + randomIntBetween(0, 1000000), DateTimeZone.UTC).toString()); + query.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(start)); + query.to(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(end)); // Create timestamp option only then we have a date mapper, // otherwise we could trigger exception. if (createShardContext().getMapperService().fullName(DATE_FIELD_NAME) != null) { if (randomBoolean()) { - query.timeZone(randomDateTimeZone().getID()); + query.timeZone(randomZone().getId()); } if (randomBoolean()) { - query.format("yyyy-MM-dd'T'HH:mm:ss.SSSZZ"); + String format = "strict_date_optional_time"; + query.format(format); } } break; @@ -444,7 +450,7 @@ protected MappedFieldType.Relation getRelation(QueryRewriteContext queryRewriteC DateTime queryToValue = new DateTime(2016, 1, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); query.from(queryFromValue); query.to(queryToValue); - query.timeZone(randomDateTimeZone().getID()); + query.timeZone(randomZone().getId()); query.format("yyyy-MM-dd"); QueryShardContext queryShardContext = createShardContext(); QueryBuilder rewritten = query.rewrite(queryShardContext); diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 5fca37a71886d..f67ce8de1422a 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkShardRequest; @@ -198,13 +199,14 @@ public IndexResult index(Index op) throws IOException { Future fut = shards.asyncRecoverReplica(replica, (shard, node) -> new RecoveryTarget(shard, node, recoveryListener, v -> {}){ @Override - public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, + ActionListener listener) { try { indexedOnPrimary.await(); } catch (InterruptedException e) { throw new AssertionError(e); } - super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps); + super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps, listener); } }); fut.get(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 0d58239fd4a56..1ea90ec6e8fbc 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2573,8 +2573,8 @@ public void testRefreshListenersDuringPeerRecovery() throws IOException { }) { // we're only checking that listeners are called when the engine is open, before there is no point @Override - public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { - super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps); + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { + super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps, listener); assertListenerCalled.accept(replica); } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 6d1db852a85a4..7e13b38fd3d25 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.cache.request.RequestCacheStats; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -34,8 +35,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.joda.time.DateTimeZone; +import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; @@ -68,7 +69,7 @@ public void testCacheAggs() throws Exception { // which used to not work well with the query cache because of the handles stream output // see #9500 final SearchResponse r1 = client.prepareSearch("index").setSize(0).setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation(dateHistogram("histo").field("f").timeZone(DateTimeZone.forID("+01:00")).minDocCount(0) + .addAggregation(dateHistogram("histo").field("f").timeZone(ZoneId.of("+01:00")).minDocCount(0) .dateHistogramInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(r1); @@ -80,7 +81,7 @@ public void testCacheAggs() throws Exception { for (int i = 0; i < 10; ++i) { final SearchResponse r2 = client.prepareSearch("index").setSize(0) .setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(dateHistogram("histo").field("f") - .timeZone(DateTimeZone.forID("+01:00")).minDocCount(0).dateHistogramInterval(DateHistogramInterval.MONTH)) + .timeZone(ZoneId.of("+01:00")).minDocCount(0).dateHistogramInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(r2); Histogram h1 = r1.getAggregations().get("histo"); @@ -246,15 +247,16 @@ public void testQueryRewriteDatesWithNow() throws Exception { assertAcked(client.admin().indices().prepareCreate("index-3").addMapping("type", "d", "type=date") .setSettings(settings).get()); ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); - indexRandom(true, client.prepareIndex("index-1", "type", "1").setSource("d", now), - client.prepareIndex("index-1", "type", "2").setSource("d", now.minusDays(1)), - client.prepareIndex("index-1", "type", "3").setSource("d", now.minusDays(2)), - client.prepareIndex("index-2", "type", "4").setSource("d", now.minusDays(3)), - client.prepareIndex("index-2", "type", "5").setSource("d", now.minusDays(4)), - client.prepareIndex("index-2", "type", "6").setSource("d", now.minusDays(5)), - client.prepareIndex("index-3", "type", "7").setSource("d", now.minusDays(6)), - client.prepareIndex("index-3", "type", "8").setSource("d", now.minusDays(7)), - client.prepareIndex("index-3", "type", "9").setSource("d", now.minusDays(8))); + DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time"); + indexRandom(true, client.prepareIndex("index-1", "type", "1").setSource("d", formatter.format(now)), + client.prepareIndex("index-1", "type", "2").setSource("d", formatter.format(now.minusDays(1))), + client.prepareIndex("index-1", "type", "3").setSource("d", formatter.format(now.minusDays(2))), + client.prepareIndex("index-2", "type", "4").setSource("d", formatter.format(now.minusDays(3))), + client.prepareIndex("index-2", "type", "5").setSource("d", formatter.format(now.minusDays(4))), + client.prepareIndex("index-2", "type", "6").setSource("d", formatter.format(now.minusDays(5))), + client.prepareIndex("index-3", "type", "7").setSource("d", formatter.format(now.minusDays(6))), + client.prepareIndex("index-3", "type", "8").setSource("d", formatter.format(now.minusDays(7))), + client.prepareIndex("index-3", "type", "9").setSource("d", formatter.format(now.minusDays(8)))); ensureSearchable("index-1", "index-2", "index-3"); assertCacheState(client, "index-1", 0, 0); assertCacheState(client, "index-2", 0, 0); diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index b9e0bd13f3568..9d7f3d5e253cf 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -20,11 +20,13 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -38,6 +40,8 @@ import java.util.Map; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testModificationPreventsFlushing() throws InterruptedException { @@ -130,22 +134,26 @@ public void testSyncFailsIfOperationIsInFlight() throws InterruptedException, Ex } public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException { - createIndex("test"); + createIndex("test", Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build()); IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); + final IndexShard shard = test.getShardOrNull(0); + assertNotNull(shard); + final ShardId shardId = shard.shardId(); + + final SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(new ShardId("test", "_na_", 1), listener); + flushService.attemptSyncedFlush(new ShardId(shard.shardId().getIndex(), 1), listener); listener.latch.await(); assertNotNull(listener.error); assertNull(listener.result); assertEquals(ShardNotFoundException.class, listener.error.getClass()); assertEquals("no such shard", listener.error.getMessage()); - final ShardId shardId = shard.shardId(); - - client().admin().indices().prepareClose("test").get(); + assertAcked(client().admin().indices().prepareClose("test")); listener = new SyncedFlushUtil.LatchedListener(); flushService.attemptSyncedFlush(shardId, listener); listener.latch.await(); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 0cecc925b2488..cd495b7e17bee 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -491,9 +491,9 @@ public SendFileResult phase1(final IndexCommit snapshot, final Supplier } @Override - TimeValue prepareTargetForTranslog(final boolean fileBasedRecovery, final int totalTranslogOps) throws IOException { + void prepareTargetForTranslog(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { prepareTargetForTranslogCalled.set(true); - return super.prepareTargetForTranslog(fileBasedRecovery, totalTranslogOps); + super.prepareTargetForTranslog(fileBasedRecovery, totalTranslogOps, listener); } @Override @@ -700,7 +700,7 @@ private List generateFiles(Store store, int numFiles, IntSupp class TestRecoveryTargetHandler implements RecoveryTargetHandler { @Override - public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) { + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { } @Override diff --git a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 6a77da8738c4e..4302549e2f1fd 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -136,7 +136,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { builder.nodes(DiscoveryNodes.builder(currentState.nodes()).remove("_non_existent")); currentState = builder.build(); - return allocationService.deassociateDeadNodes(currentState, true, "reroute"); + return allocationService.disassociateDeadNodes(currentState, true, "reroute"); } diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java index ebf77d1e80360..c3863e8ec08e4 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java @@ -20,7 +20,9 @@ package org.elasticsearch.persistent; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -63,10 +65,13 @@ import static org.elasticsearch.persistent.PersistentTasksClusterService.persistentTasksChanged; import static org.elasticsearch.persistent.PersistentTasksExecutor.NO_NODE_FOUND; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doAnswer; @@ -464,6 +469,56 @@ public void testPeriodicRecheck() throws Exception { }); } + public void testUnassignTask() { + ClusterState clusterState = initialState(); + ClusterState.Builder builder = ClusterState.builder(clusterState); + PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder( + clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE)); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("_node_1", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("_node_1") + .masterNodeId("_node_1") + .add(new DiscoveryNode("_node_2", buildNewFakeTransportAddress(), Version.CURRENT)); + + String unassignedId = addTask(tasks, "unassign", "_node_2"); + + MetaData.Builder metaData = MetaData.builder(clusterState.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build()); + clusterState = builder.metaData(metaData).nodes(nodes).build(); + setState(clusterService, clusterState); + PersistentTasksClusterService service = createService((params, currentState) -> + new Assignment("_node_2", "test")); + service.unassignPersistentTask(unassignedId, tasks.getLastAllocationId(), "unassignment test", ActionListener.wrap( + task -> { + assertThat(task.getAssignment().getExecutorNode(), is(nullValue())); + assertThat(task.getId(), equalTo(unassignedId)); + assertThat(task.getAssignment().getExplanation(), equalTo("unassignment test")); + }, + e -> fail() + )); + } + + public void testUnassignNonExistentTask() { + ClusterState clusterState = initialState(); + ClusterState.Builder builder = ClusterState.builder(clusterState); + PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder( + clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE)); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("_node_1", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("_node_1") + .masterNodeId("_node_1") + .add(new DiscoveryNode("_node_2", buildNewFakeTransportAddress(), Version.CURRENT)); + + MetaData.Builder metaData = MetaData.builder(clusterState.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build()); + clusterState = builder.metaData(metaData).nodes(nodes).build(); + setState(clusterService, clusterState); + PersistentTasksClusterService service = createService((params, currentState) -> + new Assignment("_node_2", "test")); + service.unassignPersistentTask("missing-task", tasks.getLastAllocationId(), "unassignment test", ActionListener.wrap( + task -> fail(), + e -> assertThat(e, instanceOf(ResourceNotFoundException.class)) + )); + } + private ClusterService createRecheckTestClusterService(ClusterState initialState, boolean shouldSimulateFailure) { AtomicBoolean testFailureNextTime = new AtomicBoolean(shouldSimulateFailure); AtomicReference state = new AtomicReference<>(initialState); @@ -728,9 +783,11 @@ private ClusterState.Builder addRandomTask(ClusterState.Builder clusterStateBuil tasks.addTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams(param), assignment).build())); } - private void addTask(PersistentTasksCustomMetaData.Builder tasks, String param, String node) { - tasks.addTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams(param), + private String addTask(PersistentTasksCustomMetaData.Builder tasks, String param, String node) { + String id = UUIDs.base64UUID(); + tasks.addTask(id, TestPersistentTasksExecutor.NAME, new TestParams(param), new Assignment(node, "explanation: " + param)); + return id; } private DiscoveryNode newNode(String nodeId) { diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java index 2a180cc12dd19..9118fad66b0dd 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java @@ -21,10 +21,14 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData.Custom; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -33,9 +37,11 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -65,6 +71,8 @@ import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializationTestCase { @@ -307,6 +315,91 @@ public void testFeatureSerialization() throws IOException { assertThat(read.taskMap().keySet(), equalTo(Collections.singleton("test_compatible"))); } + public void testDisassociateDeadNodes_givenNoPersistentTasks() { + ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")).build(); + ClusterState returnedState = PersistentTasksCustomMetaData.disassociateDeadNodes(originalState); + assertThat(originalState, sameInstance(returnedState)); + } + + public void testDisassociateDeadNodes_givenAssignedPersistentTask() { + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("node1") + .masterNodeId("node1") + .build(); + + String taskName = "test/task"; + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder() + .addTask("task-id", taskName, emptyTaskParams(taskName), + new PersistentTasksCustomMetaData.Assignment("node1", "test assignment")); + + ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")) + .nodes(nodes) + .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) + .build(); + ClusterState returnedState = PersistentTasksCustomMetaData.disassociateDeadNodes(originalState); + assertThat(originalState, sameInstance(returnedState)); + + PersistentTasksCustomMetaData originalTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(originalState); + PersistentTasksCustomMetaData returnedTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(returnedState); + assertEquals(originalTasks, returnedTasks); + } + + public void testDisassociateDeadNodes() { + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("node1") + .masterNodeId("node1") + .build(); + + String taskName = "test/task"; + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder() + .addTask("assigned-task", taskName, emptyTaskParams(taskName), + new PersistentTasksCustomMetaData.Assignment("node1", "test assignment")) + .addTask("task-on-deceased-node", taskName, emptyTaskParams(taskName), + new PersistentTasksCustomMetaData.Assignment("left-the-cluster", "test assignment")); + + ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")) + .nodes(nodes) + .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) + .build(); + ClusterState returnedState = PersistentTasksCustomMetaData.disassociateDeadNodes(originalState); + assertThat(originalState, not(sameInstance(returnedState))); + + PersistentTasksCustomMetaData originalTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(originalState); + PersistentTasksCustomMetaData returnedTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(returnedState); + assertNotEquals(originalTasks, returnedTasks); + + assertEquals(originalTasks.getTask("assigned-task"), returnedTasks.getTask("assigned-task")); + assertNotEquals(originalTasks.getTask("task-on-deceased-node"), returnedTasks.getTask("task-on-deceased-node")); + assertEquals(PersistentTasksCustomMetaData.LOST_NODE_ASSIGNMENT, returnedTasks.getTask("task-on-deceased-node").getAssignment()); + } + + private PersistentTaskParams emptyTaskParams(String taskName) { + return new PersistentTaskParams() { + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) { + return builder; + } + + @Override + public void writeTo(StreamOutput out) { + + } + + @Override + public String getWriteableName() { + return taskName; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + }; + } + private Assignment randomAssignment() { if (randomBoolean()) { if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 8f6393986da9d..08c32665adc58 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -46,8 +46,10 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2) public class PersistentTasksExecutorIT extends ESIntegTestCase { @@ -155,11 +157,8 @@ public void testPersistentActionWithNoAvailableNode() throws Exception { Settings nodeSettings = Settings.builder().put(nodeSettings(0)).put("node.attr.test_attr", "test").build(); String newNode = internalCluster().startNode(nodeSettings); String newNodeId = internalCluster().clusterService(newNode).localNode().getId(); - assertBusy(() -> { - // Wait for the task to start - assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks() - .size(), equalTo(1)); - }); + waitForTaskToStart(); + TaskInfo taskInfo = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") .get().getTasks().get(0); @@ -199,11 +198,7 @@ public void testPersistentActionWithNonClusterStateCondition() throws Exception TestPersistentTasksExecutor.setNonClusterStateCondition(true); - assertBusy(() -> { - // Wait for the task to start - assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks() - .size(), equalTo(1)); - }); + waitForTaskToStart(); TaskInfo taskInfo = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") .get().getTasks().get(0); @@ -221,12 +216,7 @@ public void testPersistentActionStatusUpdate() throws Exception { PlainActionFuture> future = new PlainActionFuture<>(); persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); String taskId = future.get().getId(); - - assertBusy(() -> { - // Wait for the task to start - assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks() - .size(), equalTo(1)); - }); + waitForTaskToStart(); TaskInfo firstRunningTask = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") .get().getTasks().get(0); @@ -307,6 +297,62 @@ public void testCreatePersistentTaskWithDuplicateId() throws Exception { }); } + public void testUnassignRunningPersistentTask() throws Exception { + PersistentTasksClusterService persistentTasksClusterService = + internalCluster().getInstance(PersistentTasksClusterService.class, internalCluster().getMasterName()); + // Speed up rechecks to a rate that is quicker than what settings would allow + persistentTasksClusterService.setRecheckInterval(TimeValue.timeValueMillis(1)); + PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class); + PlainActionFuture> future = new PlainActionFuture<>(); + TestParams testParams = new TestParams("Blah"); + testParams.setExecutorNodeAttr("test"); + persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, future); + PersistentTask task = future.get(); + String taskId = task.getId(); + + Settings nodeSettings = Settings.builder().put(nodeSettings(0)).put("node.attr.test_attr", "test").build(); + internalCluster().startNode(nodeSettings); + + waitForTaskToStart(); + + PlainActionFuture> unassignmentFuture = new PlainActionFuture<>(); + + // Disallow re-assignment after it is unallocated to verify master and node state + TestPersistentTasksExecutor.setNonClusterStateCondition(false); + + persistentTasksClusterService.unassignPersistentTask(taskId, + task.getAllocationId() + 1, + "unassignment test", + unassignmentFuture); + PersistentTask unassignedTask = unassignmentFuture.get(); + assertThat(unassignedTask.getId(), equalTo(taskId)); + assertThat(unassignedTask.getAssignment().getExplanation(), equalTo("unassignment test")); + assertThat(unassignedTask.getAssignment().getExecutorNode(), is(nullValue())); + + assertBusy(() -> { + // Verify that the task is NOT running on the node + List tasks = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get() + .getTasks(); + assertThat(tasks.size(), equalTo(0)); + + // Verify that the task is STILL in internal cluster state + assertClusterStateHasTask(taskId); + }); + + // Allow it to be reassigned again to the same node + TestPersistentTasksExecutor.setNonClusterStateCondition(true); + + // Verify it starts again + waitForTaskToStart(); + + assertClusterStateHasTask(taskId); + + // Complete or cancel the running task + TaskInfo taskInfo = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get().getTasks().get(0); + stopOrCancelTask(taskInfo.getTaskId()); + } + private void stopOrCancelTask(TaskId taskId) { if (randomBoolean()) { logger.info("Completing the running task"); @@ -322,6 +368,25 @@ private void stopOrCancelTask(TaskId taskId) { } } + private static void waitForTaskToStart() throws Exception { + assertBusy(() -> { + // Wait for the task to start + assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks() + .size(), equalTo(1)); + }); + } + + private static void assertClusterStateHasTask(String taskId) { + Collection> clusterTasks = ((PersistentTasksCustomMetaData) internalCluster() + .clusterService() + .state() + .getMetaData() + .custom(PersistentTasksCustomMetaData.TYPE)) + .tasks(); + assertThat(clusterTasks, hasSize(1)); + assertThat(clusterTasks.iterator().next().getId(), equalTo(taskId)); + } + private void assertNoRunningTasks() throws Exception { assertBusy(() -> { // Wait for the task to finish diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingActionTests.java index dcf4237ae07bf..915f8597aa484 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingActionTests.java @@ -40,6 +40,31 @@ public void setUpAction() { new RestGetFieldMappingAction(Settings.EMPTY, controller()); } + public void testIncludeTypeName() { + Map params = new HashMap<>(); + String path; + if (randomBoolean()) { + params.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); + path = "some_index/some_type/_mapping/field/some_field"; + } else { + params.put(INCLUDE_TYPE_NAME_PARAMETER, "false"); + path = "some_index/_mapping/field/some_field"; + } + RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath(path) + .withParams(params) + .build(); + dispatchRequest(deprecatedRequest); + assertWarnings(RestGetFieldMappingAction.TYPES_DEPRECATION_MESSAGE); + + RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("some_index/_mapping/field/some_field") + .build(); + dispatchRequest(validRequest); + } + public void testTypeInPath() { // Test that specifying a type while setting include_type_name to false // results in an illegal argument exception. diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index 81f5c7982d4d8..4486d4ff83ffb 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -29,8 +29,8 @@ import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.List; @@ -60,15 +60,15 @@ public void testSerialization() throws Exception { assertEquals(DocValueFormat.Decimal.class, vf.getClass()); assertEquals("###.##", ((DocValueFormat.Decimal) vf).pattern); - DocValueFormat.DateTime dateFormat = - new DocValueFormat.DateTime(DateFormatter.forPattern("epoch_second"), DateTimeZone.forOffsetHours(1)); + DateFormatter formatter = DateFormatter.forPattern("epoch_second"); + DocValueFormat.DateTime dateFormat = new DocValueFormat.DateTime(formatter, ZoneOffset.ofHours(1)); out = new BytesStreamOutput(); out.writeNamedWriteable(dateFormat); in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), registry); vf = in.readNamedWriteable(DocValueFormat.class); assertEquals(DocValueFormat.DateTime.class, vf.getClass()); assertEquals("epoch_second", ((DocValueFormat.DateTime) vf).formatter.pattern()); - assertEquals(DateTimeZone.forOffsetHours(1), ((DocValueFormat.DateTime) vf).timeZone); + assertEquals(ZoneOffset.ofHours(1), ((DocValueFormat.DateTime) vf).timeZone); out = new BytesStreamOutput(); out.writeNamedWriteable(DocValueFormat.GEOHASH); diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java index fee55f1e22f23..4831729201183 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java @@ -90,6 +90,11 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp if (randomBoolean()) { hit.version(randomLong()); } + + if (randomBoolean()) { + hit.version(randomNonNegativeLong()); + hit.version(randomLongBetween(1, Long.MAX_VALUE)); + } if (randomBoolean()) { hit.sortValues(SearchSortValuesTests.createTestItem(xContentType, transportSerialization)); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java index 3a10edf183376..a54f30ffac0d1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java @@ -36,7 +36,7 @@ protected AutoDateHistogramAggregationBuilder createTestAggregatorBuilder() { builder.missing(randomIntBetween(0, 10)); } if (randomBoolean()) { - builder.timeZone(randomDateTimeZone()); + builder.timeZone(randomZone()); } return builder; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index f65f2bde9662a..c59be546acd1a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -22,12 +22,12 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateMathParser; -import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.MatchNoneQueryBuilder; @@ -46,13 +46,14 @@ import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; import org.junit.After; import java.io.IOException; +import java.time.Instant; import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -83,21 +84,21 @@ @ESIntegTestCase.SuiteScopeTestCase public class DateHistogramIT extends ESIntegTestCase { - static Map> expectedMultiSortBuckets; + static Map> expectedMultiSortBuckets; - private DateTime date(int month, int day) { - return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC); + private ZonedDateTime date(int month, int day) { + return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } - private DateTime date(String date) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date); + private ZonedDateTime date(String date) { + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)); } - private static String format(DateTime date, String pattern) { - return DateTimeFormat.forPattern(pattern).print(date); + private static String format(ZonedDateTime date, String pattern) { + return DateFormatter.forPattern(pattern).format(date); } - private IndexRequestBuilder indexDoc(String idx, DateTime date, int value) throws Exception { + private IndexRequestBuilder indexDoc(String idx, ZonedDateTime date, int value) throws Exception { return client().prepareIndex(idx, "type").setSource(jsonBuilder() .startObject() .timeField("date", date) @@ -142,7 +143,7 @@ public void setupSuiteScopeCluster() throws Exception { ensureSearchable(); } - private void addExpectedBucket(DateTime key, long docCount, double avg, double sum) { + private void addExpectedBucket(ZonedDateTime key, long docCount, double avg, double sum) { Map bucketProps = new HashMap<>(); bucketProps.put("_count", docCount); bucketProps.put("avg_l", avg); @@ -196,13 +197,12 @@ public void afterEachTest() throws IOException { internalCluster().wipeIndices("idx2"); } - private static String getBucketKeyAsString(DateTime key) { - return getBucketKeyAsString(key, DateTimeZone.UTC); + private static String getBucketKeyAsString(ZonedDateTime key) { + return getBucketKeyAsString(key, ZoneOffset.UTC); } - private static String getBucketKeyAsString(DateTime key, DateTimeZone tz) { - ZoneId zoneId = DateUtils.dateTimeZoneToZoneId(tz); - return DateFormatter.forPattern(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.pattern()).withZone(zoneId).formatJoda(key); + private static String getBucketKeyAsString(ZonedDateTime key, ZoneId tz) { + return DateFormatter.forPattern("strict_date_optional_time").withZone(tz).format(key); } public void testSingleValuedField() throws Exception { @@ -218,35 +218,34 @@ public void testSingleValuedField() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } public void testSingleValuedFieldWithTimeZone() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date") - .dateHistogramInterval(DateHistogramInterval.DAY) - .minDocCount(1) - .timeZone(DateTimeZone.forID("+01:00"))).get(); - DateTimeZone tz = DateTimeZone.forID("+01:00"); + .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(1) + .timeZone(ZoneId.of("+01:00"))).execute() + .actionGet(); + ZoneId tz = ZoneId.of("+01:00"); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); @@ -255,46 +254,46 @@ public void testSingleValuedFieldWithTimeZone() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(6)); - DateTime key = new DateTime(2012, 1, 1, 23, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 14, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 3, 1, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 3, 14, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(4); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 3, 22, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(5); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); } @@ -304,7 +303,7 @@ public void testSingleValued_timeZone_epoch() throws Exception { if (randomBoolean()) { format = format + "||date_optional_time"; } - DateTimeZone tz = DateTimeZone.forID("+01:00"); + ZoneId tz = ZoneId.of("+01:00"); SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo").field("date") .dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(1) @@ -318,21 +317,25 @@ public void testSingleValued_timeZone_epoch() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(6)); - List expectedKeys = new ArrayList<>(); - expectedKeys.add(new DateTime(2012, 1, 1, 23, 0, DateTimeZone.UTC)); - expectedKeys.add(new DateTime(2012, 2, 1, 23, 0, DateTimeZone.UTC)); - expectedKeys.add(new DateTime(2012, 2, 14, 23, 0, DateTimeZone.UTC)); - expectedKeys.add(new DateTime(2012, 3, 1, 23, 0, DateTimeZone.UTC)); - expectedKeys.add(new DateTime(2012, 3, 14, 23, 0, DateTimeZone.UTC)); - expectedKeys.add(new DateTime(2012, 3, 22, 23, 0, DateTimeZone.UTC)); - + List expectedKeys = new ArrayList<>(); + expectedKeys.add(ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC)); - Iterator keyIterator = expectedKeys.iterator(); + Iterator keyIterator = expectedKeys.iterator(); for (Histogram.Bucket bucket : buckets) { assertThat(bucket, notNullValue()); - DateTime expectedKey = keyIterator.next(); - assertThat(bucket.getKeyAsString(), equalTo(Long.toString(expectedKey.getMillis() / millisDivider))); - assertThat(((DateTime) bucket.getKey()), equalTo(expectedKey)); + ZonedDateTime expectedKey = keyIterator.next(); + String bucketKey = bucket.getKeyAsString(); + String expectedBucketName = Long.toString(expectedKey.toInstant().toEpochMilli() / millisDivider); + if (JavaVersion.current().getVersion().get(0) == 8 && bucket.getKeyAsString().endsWith(".0")) { + expectedBucketName = expectedBucketName + ".0"; + } + assertThat(bucketKey, equalTo(expectedBucketName)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(expectedKey)); assertThat(bucket.getDocCount(), equalTo(1L)); } } @@ -355,7 +358,7 @@ public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { int i = 0; for (Histogram.Bucket bucket : buckets) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i++; } } @@ -377,7 +380,7 @@ public void testSingleValuedFieldOrderedByKeyDesc() throws Exception { int i = 2; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } } @@ -399,7 +402,7 @@ public void testSingleValuedFieldOrderedByCountAsc() throws Exception { int i = 0; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i++; } } @@ -421,7 +424,7 @@ public void testSingleValuedFieldOrderedByCountDesc() throws Exception { int i = 2; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } } @@ -444,42 +447,42 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)histo).getProperty("_count"); Object[] propertiesCounts = (Object[]) ((InternalAggregation)histo).getProperty("sum.value"); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); Sum sum = bucket.getAggregations().get("sum"); assertThat(sum, notNullValue()); assertThat(sum.getValue(), equalTo(1.0)); - assertThat((DateTime) propertiesKeys[0], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[0], equalTo(key)); assertThat((long) propertiesDocCounts[0], equalTo(1L)); assertThat((double) propertiesCounts[0], equalTo(1.0)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); sum = bucket.getAggregations().get("sum"); assertThat(sum, notNullValue()); assertThat(sum.getValue(), equalTo(5.0)); - assertThat((DateTime) propertiesKeys[1], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[1], equalTo(key)); assertThat((long) propertiesDocCounts[1], equalTo(2L)); assertThat((double) propertiesCounts[1], equalTo(5.0)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); sum = bucket.getAggregations().get("sum"); assertThat(sum, notNullValue()); assertThat(sum.getValue(), equalTo(15.0)); - assertThat((DateTime) propertiesKeys[2], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[2], equalTo(key)); assertThat((long) propertiesDocCounts[2], equalTo(3L)); assertThat((double) propertiesCounts[2], equalTo(15.0)); } @@ -502,7 +505,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { int i = 0; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i++; } } @@ -525,7 +528,7 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception int i = 2; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } } @@ -548,7 +551,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws int i = 2; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } } @@ -625,25 +628,25 @@ public void testSingleValuedFieldWithValueScript() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); - key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -669,32 +672,32 @@ public void testMultiValuedField() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(4)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(5L)); - key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -763,32 +766,32 @@ public void testMultiValuedFieldWithValueScript() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(4)); - DateTime key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); - key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(5L)); - key = new DateTime(2012, 5, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 5, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -817,25 +820,25 @@ public void testScriptSingleValue() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -856,32 +859,32 @@ public void testScriptMultiValued() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(4)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(5L)); - key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -922,25 +925,25 @@ public void testPartiallyUnmapped() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -971,7 +974,7 @@ public void testEmptyAggregation() throws Exception { public void testSingleValueWithTimeZone() throws Exception { prepareCreate("idx2").addMapping("type", "date", "type=date").get(); IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; - DateTime date = date("2014-03-11T00:00:00+00:00"); + ZonedDateTime date = date("2014-03-11T00:00:00+00:00"); for (int i = 0; i < reqs.length; i++) { reqs[i] = client().prepareIndex("idx2", "type", "" + i) .setSource(jsonBuilder().startObject().timeField("date", date).endObject()); @@ -983,9 +986,9 @@ public void testSingleValueWithTimeZone() throws Exception { .setQuery(matchAllQuery()) .addAggregation(dateHistogram("date_histo") .field("date") - .timeZone(DateTimeZone.forID("-02:00")) + .timeZone(ZoneId.of("-02:00")) .dateHistogramInterval(DateHistogramInterval.DAY) - .format("yyyy-MM-dd:HH-mm-ssZZ")) + .format("yyyy-MM-dd:HH-mm-ssZZZZZ")) .get(); assertThat(response.getHits().getTotalHits().value, equalTo(5L)); @@ -1010,8 +1013,9 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { // we're testing on days, so the base must be rounded to a day int interval = randomIntBetween(1, 2); // in days long intervalMillis = interval * 24 * 60 * 60 * 1000; - DateTime base = new DateTime(DateTimeZone.UTC).dayOfMonth().roundFloorCopy(); - DateTime baseKey = new DateTime(intervalMillis * (base.getMillis() / intervalMillis), DateTimeZone.UTC); + ZonedDateTime base = ZonedDateTime.now(ZoneOffset.UTC).withDayOfMonth(1); + ZonedDateTime baseKey = Instant.ofEpochMilli(intervalMillis * (base.toInstant().toEpochMilli() / intervalMillis)) + .atZone(ZoneOffset.UTC); prepareCreate("idx2") .setSettings( @@ -1028,7 +1032,7 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { } else { int docCount = randomIntBetween(1, 3); for (int j = 0; j < docCount; j++) { - DateTime date = baseKey.plusDays(i * interval + randomIntBetween(0, interval - 1)); + ZonedDateTime date = baseKey.plusDays(i * interval + randomIntBetween(0, interval - 1)); builders.add(indexDoc("idx2", date, j)); } docCounts[i] = docCount; @@ -1037,19 +1041,19 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { indexRandom(true, builders); ensureSearchable("idx2"); - DateTime lastDataBucketKey = baseKey.plusDays((numOfBuckets - 1) * interval); + ZonedDateTime lastDataBucketKey = baseKey.plusDays((numOfBuckets - 1) * interval); // randomizing the number of buckets on the min bound // (can sometimes fall within the data range, but more frequently will fall before the data range) int addedBucketsLeft = randomIntBetween(0, numOfBuckets); - DateTime boundsMinKey; + ZonedDateTime boundsMinKey; if (frequently()) { boundsMinKey = baseKey.minusDays(addedBucketsLeft * interval); } else { boundsMinKey = baseKey.plusDays(addedBucketsLeft * interval); addedBucketsLeft = 0; } - DateTime boundsMin = boundsMinKey.plusDays(randomIntBetween(0, interval - 1)); + ZonedDateTime boundsMin = boundsMinKey.plusDays(randomIntBetween(0, interval - 1)); // randomizing the number of buckets on the max bound // (can sometimes fall within the data range, but more frequently will fall after the data range) @@ -1059,8 +1063,8 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { addedBucketsRight = 0; boundsMaxKeyDelta = -boundsMaxKeyDelta; } - DateTime boundsMaxKey = lastDataBucketKey.plusDays(boundsMaxKeyDelta); - DateTime boundsMax = boundsMaxKey.plusDays(randomIntBetween(0, interval - 1)); + ZonedDateTime boundsMaxKey = lastDataBucketKey.plusDays(boundsMaxKeyDelta); + ZonedDateTime boundsMax = boundsMaxKey.plusDays(randomIntBetween(0, interval - 1)); // it could be that the random bounds.min we chose ended up greater than // bounds.max - this should @@ -1105,11 +1109,11 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(bucketsCount)); - DateTime key = baseKey.isBefore(boundsMinKey) ? baseKey : boundsMinKey; + ZonedDateTime key = baseKey.isBefore(boundsMinKey) ? baseKey : boundsMinKey; for (int i = 0; i < bucketsCount; i++) { Histogram.Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getKeyAsString(), equalTo(format(key, pattern))); assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); key = key.plusDays(interval); @@ -1126,15 +1130,15 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { .setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1).put("index.number_of_replicas", 0)) .get(); - DateMathParser parser = Joda.getStrictStandardDateFormatter().toDateMathParser(); + DateMathParser parser = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis").toDateMathParser(); // we pick a random timezone offset of +12/-12 hours and insert two documents // one at 00:00 in that time zone and one at 12:00 List builders = new ArrayList<>(); int timeZoneHourOffset = randomIntBetween(-12, 12); - DateTimeZone timezone = DateTimeZone.forOffsetHours(timeZoneHourOffset); - DateTime timeZoneStartToday = new DateTime(parser.parse("now/d", System::currentTimeMillis, false, timezone), DateTimeZone.UTC); - DateTime timeZoneNoonToday = new DateTime(parser.parse("now/d+12h", System::currentTimeMillis, false, timezone), DateTimeZone.UTC); + ZoneId timezone = ZoneOffset.ofHours(timeZoneHourOffset); + ZonedDateTime timeZoneStartToday = parser.parse("now/d", System::currentTimeMillis, false, timezone).atZone(ZoneOffset.UTC); + ZonedDateTime timeZoneNoonToday = parser.parse("now/d+12h", System::currentTimeMillis, false, timezone).atZone(ZoneOffset.UTC); builders.add(indexDoc(index, timeZoneStartToday, 1)); builders.add(indexDoc(index, timeZoneNoonToday, 2)); indexRandom(true, builders); @@ -1145,7 +1149,7 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { response = client() .prepareSearch(index) .setQuery(QueryBuilders.rangeQuery("date") - .from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getID())) + .from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getId())) .addAggregation( dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.hours(1)) .timeZone(timezone).minDocCount(0).extendedBounds(new ExtendedBounds("now/d", "now/d+23h")) @@ -1164,8 +1168,8 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { for (int i = 0; i < buckets.size(); i++) { Histogram.Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); - assertThat("InternalBucket " + i + " had wrong key", (DateTime) bucket.getKey(), - equalTo(new DateTime(timeZoneStartToday.getMillis() + (i * 60 * 60 * 1000), DateTimeZone.UTC))); + ZonedDateTime zonedDateTime = timeZoneStartToday.plus(i * 60 * 60 * 1000, ChronoUnit.MILLIS); + assertThat("InternalBucket " + i + " had wrong key", (ZonedDateTime) bucket.getKey(), equalTo(zonedDateTime)); if (i == 0 || i == 12) { assertThat(bucket.getDocCount(), equalTo(1L)); } else { @@ -1186,10 +1190,11 @@ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { .get(); List builders = new ArrayList<>(); - builders.add(indexDoc(index, DateTime.parse("2016-01-03T08:00:00.000Z"), 1)); - builders.add(indexDoc(index, DateTime.parse("2016-01-03T08:00:00.000Z"), 2)); - builders.add(indexDoc(index, DateTime.parse("2016-01-06T08:00:00.000Z"), 3)); - builders.add(indexDoc(index, DateTime.parse("2016-01-06T08:00:00.000Z"), 4)); + DateFormatter formatter = DateFormatter.forPattern("date_optional_time"); + builders.add(indexDoc(index, DateFormatters.toZonedDateTime(formatter.parse("2016-01-03T08:00:00.000Z")), 1)); + builders.add(indexDoc(index, DateFormatters.toZonedDateTime(formatter.parse("2016-01-03T08:00:00.000Z")), 2)); + builders.add(indexDoc(index, DateFormatters.toZonedDateTime(formatter.parse("2016-01-06T08:00:00.000Z")), 3)); + builders.add(indexDoc(index, DateFormatters.toZonedDateTime(formatter.parse("2016-01-06T08:00:00.000Z")), 4)); indexRandom(true, builders); ensureSearchable(index); @@ -1233,7 +1238,7 @@ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception { String mappingJson = Strings.toString(jsonBuilder().startObject() .startObject("type").startObject("properties") - .startObject("date").field("type", "date").field("format", "dateOptionalTime||dd-MM-yyyy") + .startObject("date").field("type", "date").field("format", "strict_date_optional_time||dd-MM-yyyy") .endObject().endObject().endObject().endObject()); prepareCreate("idx2").addMapping("type", mappingJson, XContentType.JSON).get(); IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; @@ -1256,23 +1261,23 @@ public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(1)); - DateTime key = new DateTime(2014, 3, 10, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2014, 3, 10, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(5L)); } public void testIssue6965() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").timeZone(DateTimeZone.forID("+01:00")) - .dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0)) + .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("+01:00")) + .dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0)) .get(); assertSearchResponse(response); - DateTimeZone tz = DateTimeZone.forID("+01:00"); + ZoneId tz = ZoneId.of("+01:00"); Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); @@ -1280,25 +1285,25 @@ public void testIssue6965() { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2011, 12, 31, 23, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2011, 12, 31, 23, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 1, 31, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 1, 31, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); - key = new DateTime(2012, 2, 29, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 29, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -1309,7 +1314,7 @@ public void testDSTBoundaryIssue9491() throws InterruptedException, ExecutionExc ensureSearchable("test9491"); SearchResponse response = client().prepareSearch("test9491") .addAggregation(dateHistogram("histo").field("d").dateHistogramInterval(DateHistogramInterval.YEAR) - .timeZone(DateTimeZone.forID("Asia/Jerusalem"))) + .timeZone(ZoneId.of("Asia/Jerusalem")).format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX")) .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); @@ -1327,8 +1332,8 @@ public void testIssue8209() throws InterruptedException, ExecutionException { ensureSearchable("test8209"); SearchResponse response = client().prepareSearch("test8209") .addAggregation(dateHistogram("histo").field("d").dateHistogramInterval(DateHistogramInterval.MONTH) - .timeZone(DateTimeZone.forID("CET")) - .minDocCount(0)) + .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") + .timeZone(ZoneId.of("CET")).minDocCount(0)) .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); @@ -1371,7 +1376,7 @@ public void testFormatIndexUnmapped() throws InterruptedException, ExecutionExce SearchResponse response = client().prepareSearch(indexDateUnmapped) .addAggregation( - dateHistogram("histo").field("dateField").dateHistogramInterval(DateHistogramInterval.MONTH).format("YYYY-MM") + dateHistogram("histo").field("dateField").dateHistogramInterval(DateHistogramInterval.MONTH).format("yyyy-MM") .minDocCount(0).extendedBounds(new ExtendedBounds("2018-01", "2018-01"))) .get(); assertSearchResponse(response); @@ -1393,15 +1398,19 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, indexRandom(true, client().prepareIndex(index, "type").setSource("d", "1477954800000")); ensureSearchable(index); SearchResponse response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d") - .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(DateTimeZone.forID("Europe/Berlin"))).get(); + .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin"))).get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(1)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); + if (JavaVersion.current().getVersion().get(0) == 8 && histo.getBuckets().get(0).getKeyAsString().endsWith(".0")) { + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000.0")); + } else { + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); + } assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d") - .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(DateTimeZone.forID("Europe/Berlin")).format("yyyy-MM-dd")) + .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin")).format("yyyy-MM-dd")) .get(); assertSearchResponse(response); histo = response.getAggregations().get("histo"); @@ -1422,7 +1431,7 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, public void testDSTEndTransition() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(new MatchNoneQueryBuilder()) - .addAggregation(dateHistogram("histo").field("date").timeZone(DateTimeZone.forID("Europe/Oslo")) + .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("Europe/Oslo")) .dateHistogramInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds( new ExtendedBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00"))) .get(); @@ -1430,9 +1439,12 @@ public void testDSTEndTransition() throws Exception { Histogram histo = response.getAggregations().get("histo"); List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(4)); - assertThat(((DateTime) buckets.get(1).getKey()).getMillis() - ((DateTime) buckets.get(0).getKey()).getMillis(), equalTo(3600000L)); - assertThat(((DateTime) buckets.get(2).getKey()).getMillis() - ((DateTime) buckets.get(1).getKey()).getMillis(), equalTo(3600000L)); - assertThat(((DateTime) buckets.get(3).getKey()).getMillis() - ((DateTime) buckets.get(2).getKey()).getMillis(), equalTo(3600000L)); + assertThat(((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - + ((ZonedDateTime) buckets.get(0).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); + assertThat(((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - + ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); + assertThat(((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - + ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); } /** @@ -1443,8 +1455,10 @@ public void testDontCacheScripts() throws Exception { assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=date") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .get()); - indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("d", date(1, 1)), - client().prepareIndex("cache_test_idx", "type", "2").setSource("d", date(2, 1))); + String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1)); + String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("d", date), + client().prepareIndex("cache_test_idx", "type", "2").setSource("d", date2)); // Make sure we are starting with a clear cache assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() @@ -1514,7 +1528,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { - DateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(DateTime[]::new); + ZonedDateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(ZonedDateTime[]::new); SearchResponse response = client() .prepareSearch("sort_idx") .setTypes("type") @@ -1544,7 +1558,7 @@ private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { } } - private DateTime key(Histogram.Bucket bucket) { - return (DateTime) bucket.getKey(); + private ZonedDateTime key(Histogram.Bucket bucket) { + return (ZonedDateTime) bucket.getKey(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java index 44a9f8c2cb126..080c4faffd696 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -20,18 +20,19 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.test.ESIntegTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import org.junit.After; import org.junit.Before; import java.io.IOException; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.List; -import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -50,9 +51,10 @@ public class DateHistogramOffsetIT extends ESIntegTestCase { private static final String DATE_FORMAT = "yyyy-MM-dd:hh-mm-ss"; + private static final DateFormatter FORMATTER = DateFormatter.forPattern(DATE_FORMAT); - private DateTime date(String date) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date); + private ZonedDateTime date(String date) { + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)); } @Before @@ -65,8 +67,9 @@ public void afterEachTest() throws IOException { internalCluster().wipeIndices("idx2"); } - private void prepareIndex(DateTime date, int numHours, int stepSizeHours, int idxIdStart) - throws IOException, InterruptedException, ExecutionException { + private void prepareIndex(ZonedDateTime date, int numHours, int stepSizeHours, int idxIdStart) + throws IOException, InterruptedException { + IndexRequestBuilder[] reqs = new IndexRequestBuilder[numHours]; for (int i = idxIdStart; i < idxIdStart + reqs.length; i++) { reqs[i - idxIdStart] = client().prepareIndex("idx2", "type", "" + i) @@ -94,8 +97,8 @@ public void testSingleValueWithPositiveOffset() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(2)); - checkBucketFor(buckets.get(0), new DateTime(2014, 3, 10, 2, 0, DateTimeZone.UTC), 2L); - checkBucketFor(buckets.get(1), new DateTime(2014, 3, 11, 2, 0, DateTimeZone.UTC), 3L); + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 2, 0, 0, 0, ZoneOffset.UTC), 2L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 2, 0, 0, 0, ZoneOffset.UTC), 3L); } public void testSingleValueWithNegativeOffset() throws Exception { @@ -116,8 +119,8 @@ public void testSingleValueWithNegativeOffset() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(2)); - checkBucketFor(buckets.get(0), new DateTime(2014, 3, 9, 22, 0, DateTimeZone.UTC), 2L); - checkBucketFor(buckets.get(1), new DateTime(2014, 3, 10, 22, 0, DateTimeZone.UTC), 3L); + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 9, 22, 0, 0, 0, ZoneOffset.UTC), 2L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 10, 22, 0, 0, 0, ZoneOffset.UTC), 3L); } /** @@ -143,11 +146,11 @@ public void testSingleValueWithOffsetMinDocCount() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(5)); - checkBucketFor(buckets.get(0), new DateTime(2014, 3, 10, 6, 0, DateTimeZone.UTC), 6L); - checkBucketFor(buckets.get(1), new DateTime(2014, 3, 11, 6, 0, DateTimeZone.UTC), 6L); - checkBucketFor(buckets.get(2), new DateTime(2014, 3, 12, 6, 0, DateTimeZone.UTC), 0L); - checkBucketFor(buckets.get(3), new DateTime(2014, 3, 13, 6, 0, DateTimeZone.UTC), 6L); - checkBucketFor(buckets.get(4), new DateTime(2014, 3, 14, 6, 0, DateTimeZone.UTC), 6L); + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(2), ZonedDateTime.of(2014, 3, 12, 6, 0, 0, 0, ZoneOffset.UTC), 0L); + checkBucketFor(buckets.get(3), ZonedDateTime.of(2014, 3, 13, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(4), ZonedDateTime.of(2014, 3, 14, 6, 0, 0, 0, ZoneOffset.UTC), 6L); } /** @@ -155,10 +158,10 @@ public void testSingleValueWithOffsetMinDocCount() throws Exception { * @param key the expected key * @param expectedSize the expected size of the bucket */ - private static void checkBucketFor(Histogram.Bucket bucket, DateTime key, long expectedSize) { + private static void checkBucketFor(Histogram.Bucket bucket, ZonedDateTime key, long expectedSize) { assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(key.toString(DATE_FORMAT))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getKeyAsString(), equalTo(FORMATTER.format(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(expectedSize)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index 91834de935b4f..f50c0bfd072b1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -18,9 +18,11 @@ */ package org.elasticsearch.search.aggregations.bucket; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -33,9 +35,10 @@ import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -70,12 +73,12 @@ private static IndexRequestBuilder indexDoc(int month, int day, int value) throw .endObject()); } - private static DateTime date(int month, int day) { - return date(month, day, DateTimeZone.UTC); + private static ZonedDateTime date(int month, int day) { + return date(month, day, ZoneOffset.UTC); } - private static DateTime date(int month, int day, DateTimeZone timezone) { - return new DateTime(2012, month, day, 0, 0, timezone); + private static ZonedDateTime date(int month, int day, ZoneId timezone) { + return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, timezone); } private static int numDocs; @@ -128,7 +131,7 @@ public void testDateMath() throws Exception { .prepareSearch("idx") .addAggregation( rangeBuilder.addUnboundedTo("a long time ago", "now-50y").addRange("recently", "now-50y", "now-1y") - .addUnboundedFrom("last year", "now-1y").timeZone(DateTimeZone.forID("EST"))).get(); + .addUnboundedFrom("last year", "now-1y").timeZone(ZoneId.of("Etc/GMT+5"))).get(); assertSearchResponse(response); @@ -176,8 +179,8 @@ public void testSingleValueField() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -185,8 +188,8 @@ public void testSingleValueField() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -194,8 +197,8 @@ public void testSingleValueField() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -222,8 +225,8 @@ public void testSingleValueFieldWithStringDates() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -231,8 +234,8 @@ public void testSingleValueFieldWithStringDates() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -240,8 +243,8 @@ public void testSingleValueFieldWithStringDates() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -269,8 +272,8 @@ public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Excepti Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -278,8 +281,8 @@ public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Excepti bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15-2012-03-15")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15")); assertThat(bucket.getToAsString(), equalTo("2012-03-15")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -287,19 +290,17 @@ public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Excepti bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); } public void testSingleValueFieldWithDateMath() throws Exception { - DateTimeZone timezone = randomDateTimeZone(); - int timeZoneOffset = timezone.getOffset(date(2, 15)); - // if time zone is UTC (or equivalent), time zone suffix is "Z", else something like "+03:00", which we get with the "ZZ" format - String feb15Suffix = timeZoneOffset == 0 ? "Z" : date(2,15, timezone).toString("ZZ"); - String mar15Suffix = timeZoneOffset == 0 ? "Z" : date(3,15, timezone).toString("ZZ"); + ZoneId timezone = randomZone(); + int timeZoneOffset = timezone.getRules().getOffset(date(2, 15).toInstant()).getTotalSeconds(); + String suffix = timezone.equals(ZoneOffset.UTC) ? "Z" : timezone.getId(); long expectedFirstBucketCount = timeZoneOffset < 0 ? 3L : 2L; SearchResponse response = client().prepareSearch("idx") @@ -321,29 +322,29 @@ public void testSingleValueFieldWithDateMath() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + feb15Suffix)); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15, timezone).toDateTime(DateTimeZone.UTC))); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + suffix)); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + suffix)); assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix + - "-2012-03-15T00:00:00.000" + mar15Suffix)); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).toDateTime(DateTimeZone.UTC))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15, timezone).toDateTime(DateTimeZone.UTC))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + suffix + + "-2012-03-15T00:00:00.000" + suffix)); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + suffix)); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + suffix)); assertThat(bucket.getDocCount(), equalTo(2L)); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix + "-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15, timezone).toDateTime(DateTimeZone.UTC))); - assertThat(((DateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + suffix + "-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + suffix)); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 2L - expectedFirstBucketCount)); } @@ -369,8 +370,8 @@ public void testSingleValueFieldWithCustomKey() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r1")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -378,8 +379,8 @@ public void testSingleValueFieldWithCustomKey() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r2")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -387,8 +388,8 @@ public void testSingleValueFieldWithCustomKey() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r3")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -429,8 +430,8 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r1")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -444,8 +445,8 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r2")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -459,8 +460,8 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r3")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -502,8 +503,8 @@ public void testMultiValuedField() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -511,8 +512,8 @@ public void testMultiValuedField() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(3L)); @@ -520,8 +521,8 @@ public void testMultiValuedField() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); @@ -557,8 +558,8 @@ public void testMultiValuedFieldWithValueScript() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(1L)); @@ -566,8 +567,8 @@ public void testMultiValuedFieldWithValueScript() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -575,8 +576,8 @@ public void testMultiValuedFieldWithValueScript() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 1L)); @@ -616,8 +617,8 @@ public void testScriptSingleValue() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -625,8 +626,8 @@ public void testScriptSingleValue() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -634,8 +635,8 @@ public void testScriptSingleValue() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -675,8 +676,8 @@ public void testScriptMultiValued() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -684,8 +685,8 @@ public void testScriptMultiValued() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(3L)); @@ -693,8 +694,8 @@ public void testScriptMultiValued() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); @@ -723,8 +724,8 @@ public void testUnmapped() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -732,8 +733,8 @@ public void testUnmapped() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -741,8 +742,8 @@ public void testUnmapped() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -769,8 +770,8 @@ public void testUnmappedWithStringDates() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -778,8 +779,8 @@ public void testUnmappedWithStringDates() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -787,8 +788,8 @@ public void testUnmappedWithStringDates() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -815,8 +816,8 @@ public void testPartiallyUnmapped() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -824,8 +825,8 @@ public void testPartiallyUnmapped() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -833,8 +834,8 @@ public void testPartiallyUnmapped() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -859,8 +860,8 @@ public void testEmptyAggregation() throws Exception { assertThat(dateRange.getName(), equalTo("date_range")); assertThat(buckets.size(), is(1)); assertThat((String) buckets.get(0).getKey(), equalTo("0-1")); - assertThat(((DateTime) buckets.get(0).getFrom()).getMillis(), equalTo(0L)); - assertThat(((DateTime) buckets.get(0).getTo()).getMillis(), equalTo(1L)); + assertThat(((ZonedDateTime) buckets.get(0).getFrom()).toInstant().toEpochMilli(), equalTo(0L)); + assertThat(((ZonedDateTime) buckets.get(0).getTo()).toInstant().toEpochMilli(), equalTo(1L)); assertThat(buckets.get(0).getDocCount(), equalTo(0L)); assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true)); } @@ -903,7 +904,8 @@ public void testDontCacheScripts() throws Exception { params.put("fieldname", "date"); SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateRange("foo").field("date") .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) - .addRange(new DateTime(2012, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC), new DateTime(2013, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC))) + .addRange(ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))) .get(); assertSearchResponse(r); @@ -915,7 +917,8 @@ public void testDontCacheScripts() throws Exception { // To make sure that the cache is working test that a request not using // a script is cached r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateRange("foo").field("date") - .addRange(new DateTime(2012, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC), new DateTime(2013, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC))) + .addRange(ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))) .get(); assertSearchResponse(r); @@ -969,10 +972,9 @@ public void testRangeWithFormatStringValue() throws Exception { assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); // providing numeric input without format should throw an exception - Exception e = expectThrows(Exception.class, () -> client().prepareSearch(indexName).setSize(0) + ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> client().prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000)).get()); - Throwable cause = e.getCause(); - assertThat(cause.getMessage(), + assertThat(e.getDetailedMessage(), containsString("failed to parse date field [1000000] with format [strict_hour_minute_second]")); } @@ -984,9 +986,9 @@ public void testRangeWithFormatNumericValue() throws Exception { String indexName = "dateformat_numeric_test_idx"; assertAcked(prepareCreate(indexName).addMapping("type", "date", "type=date,format=epoch_second")); indexRandom(true, - client().prepareIndex(indexName, "type", "1").setSource(jsonBuilder().startObject().field("date", 1000).endObject()), + client().prepareIndex(indexName, "type", "1").setSource(jsonBuilder().startObject().field("date", 1002).endObject()), client().prepareIndex(indexName, "type", "2").setSource(jsonBuilder().startObject().field("date", 2000).endObject()), - client().prepareIndex(indexName, "type", "3").setSource(jsonBuilder().startObject().field("date", 3000).endObject())); + client().prepareIndex(indexName, "type", "3").setSource(jsonBuilder().startObject().field("date", 3008).endObject())); // using no format should work when to/from is compatible with format in // mapping @@ -994,39 +996,39 @@ public void testRangeWithFormatNumericValue() throws Exception { .addAggregation(dateRange("date_range").field("date").addRange(1000, 3000).addRange(3000, 4000)).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); List buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + if (JavaVersion.current().getVersion().get(0) == 8) { + assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); + } else { + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } // using no format should also work when and to/from are string values searchResponse = client().prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange("1000", "3000").addRange("3000", "4000")).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + if (JavaVersion.current().getVersion().get(0) == 8) { + assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); + } else { + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } // also e-notation should work, fractional parts should be truncated searchResponse = client().prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange(1.0e3, 3000.8123).addRange(3000.8123, 4.0e3)).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - - // also e-notation and floats provided as string also be truncated (see: #14641) - searchResponse = client().prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange("1.0e3", "3.0e3").addRange("3.0e3", "4.0e3")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - - searchResponse = client().prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange("1000.123", "3000.8").addRange("3000.8", "4000.3")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + if (JavaVersion.current().getVersion().get(0) == 8) { + assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); + } else { + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } // using different format should work when to/from is compatible with // format in aggregation @@ -1061,8 +1063,8 @@ private static List checkBuckets(Range dateRange, String expectedA private static void assertBucket(Bucket bucket, long bucketSize, String expectedKey, long expectedFrom, long expectedTo) { assertThat(bucket.getDocCount(), equalTo(bucketSize)); assertThat((String) bucket.getKey(), equalTo(expectedKey)); - assertThat(((DateTime) bucket.getFrom()).getMillis(), equalTo(expectedFrom)); - assertThat(((DateTime) bucket.getTo()).getMillis(), equalTo(expectedTo)); + assertThat(((ZonedDateTime) bucket.getFrom()).toInstant().toEpochMilli(), equalTo(expectedFrom)); + assertThat(((ZonedDateTime) bucket.getTo()).toInstant().toEpochMilli(), equalTo(expectedTo)); assertThat(bucket.getAggregations().asList().isEmpty(), is(true)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java index 1c198fd3ca5d6..34164bc28967c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java @@ -65,7 +65,7 @@ protected DateRangeAggregationBuilder createTestAggregatorBuilder() { factory.missing(randomIntBetween(0, 10)); } if (randomBoolean()) { - factory.timeZone(randomDateTimeZone()); + factory.timeZone(randomZone()); } return factory; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index e7bf0fe4cf700..b09277aca6c6d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -22,11 +22,10 @@ import com.carrotsearch.hppc.LongHashSet; import com.carrotsearch.hppc.LongSet; import com.carrotsearch.randomizedtesting.generators.RandomStrings; - import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -124,7 +123,7 @@ public void setupSuiteScopeCluster() throws Exception { double doubleTerm = longTerm * Math.PI; ZonedDateTime time = ZonedDateTime.of(2014, 1, ((int) longTerm % 20) + 1, 0, 0, 0, 0, ZoneOffset.UTC); - String dateTerm = DateFormatters.forPattern("yyyy-MM-dd").format(time); + String dateTerm = DateFormatter.forPattern("yyyy-MM-dd").format(time); final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20); for (int j = 0; j < frequency; ++j) { indexRequests.add(client().prepareIndex("idx", "type").setSource(jsonBuilder() diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java index ac985660399d7..d31f7a89b462e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java @@ -42,7 +42,7 @@ private DateHistogramValuesSourceBuilder randomDateHistogramSourceBuilder() { histo.interval(randomNonNegativeLong()); } if (randomBoolean()) { - histo.timeZone(randomDateTimeZone()); + histo.timeZone(randomZone()); } if (randomBoolean()) { histo.missingBucket(true); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index e945eeba519f4..5f219ee6be948 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -39,6 +39,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.IpFieldMapper; @@ -57,12 +58,12 @@ import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.sort.SortOrder; -import org.joda.time.DateTimeZone; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.net.InetAddress; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -1155,8 +1156,7 @@ public void testThatDateHistogramFailsFormatAfter() throws IOException { }, (result) -> {} )); - assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); - assertThat(exc.getCause().getMessage(), containsString("Parse failure")); + assertThat(exc.getMessage(), containsString("failed to parse date field [1474329600000]")); } public void testWithDateHistogramAndTimeZone() throws IOException { @@ -1176,7 +1176,7 @@ public void testWithDateHistogramAndTimeZone() throws IOException { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") .dateHistogramInterval(DateHistogramInterval.days(1)) - .timeZone(DateTimeZone.forOffsetHours(1)); + .timeZone(ZoneOffset.ofHours(1)); return new CompositeAggregationBuilder("name", Collections.singletonList(histo)); }, (result) -> { @@ -1196,7 +1196,7 @@ public void testWithDateHistogramAndTimeZone() throws IOException { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") .dateHistogramInterval(DateHistogramInterval.days(1)) - .timeZone(DateTimeZone.forOffsetHours(1)); + .timeZone(ZoneOffset.ofHours(1)); return new CompositeAggregationBuilder("name", Collections.singletonList(histo)) .aggregateAfter(createAfterKey("date", 1474326000000L)); @@ -1835,6 +1835,6 @@ private static Map> createDocument(Object... fields) { } private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java index be9a760150427..35bf575d046be 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java @@ -30,10 +30,10 @@ import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; -import org.joda.time.DateTimeZone; import org.junit.After; import java.io.IOException; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -64,7 +64,7 @@ private static DocValueFormat randomDocValueFormat(boolean isLong) { if (isLong) { // we use specific format only for date histogram on a long/date field if (randomBoolean()) { - return new DocValueFormat.DateTime(DateFormatter.forPattern("epoch_second"), DateTimeZone.forOffsetHours(1)); + return new DocValueFormat.DateTime(DateFormatter.forPattern("epoch_second"), ZoneOffset.ofHours(1)); } else { return DocValueFormat.RAW; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java index d92fb7ff62e43..af5f65b9698e4 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.junit.Before; public class FilterAggregatorTests extends AggregatorTestCase { @@ -57,10 +58,12 @@ public void testEmpty() throws Exception { InternalFilter response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); assertEquals(response.getDocCount(), 0); + assertFalse(AggregationInspectionHelper.hasValue(response)); indexReader.close(); directory.close(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37743") public void testRandom() throws Exception { Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); @@ -96,6 +99,11 @@ public void testRandom() throws Exception { response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); } assertEquals(response.getDocCount(), (long) expectedBucketCount[value]); + if (expectedBucketCount[expectedBucketCount[value]] > 0) { + assertTrue(AggregationInspectionHelper.hasValue(response)); + } else { + assertFalse(AggregationInspectionHelper.hasValue(response)); + } } indexReader.close(); directory.close(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java index 05ed091978270..ff5cb84482db0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.junit.Before; import java.util.HashSet; @@ -68,6 +69,7 @@ public void testEmpty() throws Exception { for (InternalFilters.InternalBucket filter : response.getBuckets()) { assertEquals(filter.getDocCount(), 0); } + assertFalse(AggregationInspectionHelper.hasValue(response)); indexReader.close(); directory.close(); } @@ -129,6 +131,7 @@ public void testKeyedFilter() throws Exception { assertEquals(filters.getBucketByKey("bar").getDocCount(), 1); assertEquals(filters.getBucketByKey("same").getDocCount(), 1); assertEquals(filters.getBucketByKey("other").getDocCount(), 2); + assertTrue(AggregationInspectionHelper.hasValue(filters)); } indexReader.close(); @@ -185,14 +188,22 @@ public void testRandom() throws Exception { List buckets = response.getBuckets(); assertEquals(buckets.size(), filters.length+1); + int sum = 0; for (InternalFilters.InternalBucket bucket : buckets) { if ("other".equals(bucket.getKey())) { assertEquals(bucket.getDocCount(), expectedOtherCount); } else { int index = Integer.parseInt(bucket.getKey()); assertEquals(bucket.getDocCount(), (long) expectedBucketCount[filterTerms[index]]); + sum += expectedBucketCount[filterTerms[index]]; } } + if (sum > 0) { + assertTrue(AggregationInspectionHelper.hasValue(response)); + } else { + assertFalse(AggregationInspectionHelper.hasValue(response)); + } + } indexReader.close(); directory.close(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java index 45b6b64cddc35..2d270f8298ff1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.ArrayList; @@ -53,6 +54,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, geoHashGrid -> { assertEquals(0, geoHashGrid.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(geoHashGrid)); }); } @@ -61,6 +63,7 @@ public void testFieldMissing() throws IOException { iw.addDocument(Collections.singleton(new LatLonDocValuesField(FIELD_NAME, 10D, 10D))); }, geoHashGrid -> { assertEquals(0, geoHashGrid.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(geoHashGrid)); }); } @@ -94,6 +97,7 @@ public void testWithSeveralDocs() throws IOException { for (GeoHashGrid.Bucket bucket : geoHashGrid.getBuckets()) { assertEquals((long) expectedCountPerGeoHash.get(bucket.getKeyAsString()), bucket.getDocCount()); } + assertTrue(AggregationInspectionHelper.hasValue(geoHashGrid)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java index 2d5109405dc1c..9293b33e22f43 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -33,18 +33,22 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.MultiBucketConsumerService; -import org.elasticsearch.search.aggregations.metrics.Stats; +import org.elasticsearch.search.aggregations.metrics.InternalStats; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.hamcrest.Matchers; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import org.junit.Assert; import java.io.IOException; +import java.time.LocalDate; +import java.time.YearMonth; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -58,35 +62,44 @@ public class AutoDateHistogramAggregatorTests extends AggregatorTestCase { private static final String DATE_FIELD = "date"; private static final String INSTANT_FIELD = "instant"; - private static final List DATES_WITH_TIME = Arrays.asList( - new DateTime(2010, 3, 12, 1, 7, 45, DateTimeZone.UTC), - new DateTime(2010, 4, 27, 3, 43, 34, DateTimeZone.UTC), - new DateTime(2012, 5, 18, 4, 11, 0, DateTimeZone.UTC), - new DateTime(2013, 5, 29, 5, 11, 31, DateTimeZone.UTC), - new DateTime(2013, 10, 31, 8, 24, 5, DateTimeZone.UTC), - new DateTime(2015, 2, 13, 13, 9, 32, DateTimeZone.UTC), - new DateTime(2015, 6, 24, 13, 47, 43, DateTimeZone.UTC), - new DateTime(2015, 11, 13, 16, 14, 34, DateTimeZone.UTC), - new DateTime(2016, 3, 4, 17, 9, 50, DateTimeZone.UTC), - new DateTime(2017, 12, 12, 22, 55, 46, DateTimeZone.UTC)); + private static final List DATES_WITH_TIME = Arrays.asList( + ZonedDateTime.of(2010, 3, 12, 1, 7, 45, 0, ZoneOffset.UTC), + ZonedDateTime.of(2010, 4, 27, 3, 43, 34, 0, ZoneOffset.UTC), + ZonedDateTime.of(2012, 5, 18, 4, 11, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 5, 29, 5, 11, 31, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 10, 31, 8, 24, 5, 0, ZoneOffset.UTC), + ZonedDateTime.of(2015, 2, 13, 13, 9, 32, 0, ZoneOffset.UTC), + ZonedDateTime.of(2015, 6, 24, 13, 47, 43, 0, ZoneOffset.UTC), + ZonedDateTime.of(2015, 11, 13, 16, 14, 34, 0, ZoneOffset.UTC), + ZonedDateTime.of(2016, 3, 4, 17, 9, 50, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 12, 12, 22, 55, 46, 0, ZoneOffset.UTC)); private static final Query DEFAULT_QUERY = new MatchAllDocsQuery(); public void testMatchNoDocs() throws IOException { testBothCases(new MatchNoDocsQuery(), DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); } public void testMatchAllDocs() throws IOException { testSearchCase(DEFAULT_QUERY, DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), - histogram -> assertEquals(10, histogram.getBuckets().size()) + histogram -> { + assertEquals(10, histogram.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } ); testSearchAndReduceCase(DEFAULT_QUERY, DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), - histogram -> assertEquals(8, histogram.getBuckets().size()) + histogram -> { + assertEquals(8, histogram.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } ); } @@ -95,16 +108,18 @@ public void testSubAggregations() throws IOException { aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD) .subAggregation(AggregationBuilders.stats("stats").field(DATE_FIELD)), histogram -> { + assertTrue(AggregationInspectionHelper.hasValue(histogram)); final List buckets = histogram.getBuckets(); assertEquals(8, buckets.size()); Histogram.Bucket bucket = buckets.get(0); assertEquals("2010-01-01T00:00:00.000Z", bucket.getKeyAsString()); assertEquals(2, bucket.getDocCount()); - Stats stats = bucket.getAggregations().get("stats"); + InternalStats stats = bucket.getAggregations().get("stats"); assertEquals("2010-03-12T01:07:45.000Z", stats.getMinAsString()); assertEquals("2010-04-27T03:43:34.000Z", stats.getMaxAsString()); assertEquals(2L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(1); assertEquals("2011-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -113,6 +128,7 @@ public void testSubAggregations() throws IOException { assertTrue(Double.isInfinite(stats.getMin())); assertTrue(Double.isInfinite(stats.getMax())); assertEquals(0L, stats.getCount()); + assertFalse(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(2); assertEquals("2012-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -121,6 +137,7 @@ public void testSubAggregations() throws IOException { assertEquals("2012-05-18T04:11:00.000Z", stats.getMinAsString()); assertEquals("2012-05-18T04:11:00.000Z", stats.getMaxAsString()); assertEquals(1L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(3); assertEquals("2013-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -129,6 +146,7 @@ public void testSubAggregations() throws IOException { assertEquals("2013-05-29T05:11:31.000Z", stats.getMinAsString()); assertEquals("2013-10-31T08:24:05.000Z", stats.getMaxAsString()); assertEquals(2L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(4); assertEquals("2014-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -137,6 +155,7 @@ public void testSubAggregations() throws IOException { assertTrue(Double.isInfinite(stats.getMin())); assertTrue(Double.isInfinite(stats.getMax())); assertEquals(0L, stats.getCount()); + assertFalse(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(5); assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -145,6 +164,7 @@ public void testSubAggregations() throws IOException { assertEquals("2015-02-13T13:09:32.000Z", stats.getMinAsString()); assertEquals("2015-11-13T16:14:34.000Z", stats.getMaxAsString()); assertEquals(3L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(6); assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -153,6 +173,7 @@ public void testSubAggregations() throws IOException { assertEquals("2016-03-04T17:09:50.000Z", stats.getMinAsString()); assertEquals("2016-03-04T17:09:50.000Z", stats.getMaxAsString()); assertEquals(1L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(7); assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -161,15 +182,19 @@ public void testSubAggregations() throws IOException { assertEquals("2017-12-12T22:55:46.000Z", stats.getMinAsString()); assertEquals("2017-12-12T22:55:46.000Z", stats.getMaxAsString()); assertEquals(1L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); }); } public void testNoDocs() throws IOException { - final List dates = Collections.emptyList(); + final List dates = Collections.emptyList(); final Consumer aggregation = agg -> agg.setNumBuckets(10).field(DATE_FIELD); testSearchCase(DEFAULT_QUERY, dates, aggregation, - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); testSearchAndReduceCase(DEFAULT_QUERY, dates, aggregation, Assert::assertNull @@ -179,13 +204,18 @@ public void testNoDocs() throws IOException { public void testAggregateWrongField() throws IOException { testBothCases(DEFAULT_QUERY, DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(10).field("wrong_field"), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); } public void testIntervalYear() throws IOException { - final long start = new DateTime(DateTimeZone.UTC).withDate(2015, 1, 1).getMillis(); - final long end = new DateTime(DateTimeZone.UTC).withDate(2017, 12, 31).getMillis(); + + + final long start = LocalDate.of(2015, 1, 1).atStartOfDay(ZoneOffset.UTC).toInstant().toEpochMilli(); + final long end = LocalDate.of(2017, 12, 31).atStartOfDay(ZoneOffset.UTC).toInstant().toEpochMilli(); final Query rangeQuery = LongPoint.newRangeQuery(INSTANT_FIELD, start, end); testSearchCase(rangeQuery, DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), @@ -197,13 +227,14 @@ public void testIntervalYear() throws IOException { assertEquals(DATES_WITH_TIME.get(5 + i), bucket.getKey()); assertEquals(1, bucket.getDocCount()); } + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } ); testSearchAndReduceCase(rangeQuery, DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), histogram -> { - final DateTime startDate = new DateTime(2015, 1, 1, 0, 0, DateTimeZone.UTC); - final Map expectedDocCount = new HashMap<>(); + final ZonedDateTime startDate = ZonedDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + final Map expectedDocCount = new HashMap<>(); expectedDocCount.put(startDate, 3); expectedDocCount.put(startDate.plusYears(1), 1); expectedDocCount.put(startDate.plusYears(2), 1); @@ -211,18 +242,19 @@ public void testIntervalYear() throws IOException { assertEquals(expectedDocCount.size(), buckets.size()); buckets.forEach(bucket -> assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } ); } public void testIntervalMonth() throws IOException { - final List datesForMonthInterval = Arrays.asList( - new DateTime(2017, 1, 1, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 3, 4, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 3, 5, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 3, 6, 0, 0, 0, DateTimeZone.UTC)); + final List datesForMonthInterval = Arrays.asList( + ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 3, 4, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 3, 5, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 3, 6, 0, 0, 0, 0, ZoneOffset.UTC)); testSearchCase(DEFAULT_QUERY, datesForMonthInterval, aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), histogram -> { final List buckets = histogram.getBuckets(); @@ -236,7 +268,7 @@ public void testIntervalMonth() throws IOException { testSearchAndReduceCase(DEFAULT_QUERY, datesForMonthInterval, aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), histogram -> { - final Map expectedDocCount = new HashMap<>(); + final Map expectedDocCount = new HashMap<>(); expectedDocCount.put(datesForMonthInterval.get(0).withDayOfMonth(1), 1); expectedDocCount.put(datesForMonthInterval.get(1).withDayOfMonth(1), 2); expectedDocCount.put(datesForMonthInterval.get(3).withDayOfMonth(1), 3); @@ -244,6 +276,7 @@ public void testIntervalMonth() throws IOException { assertEquals(expectedDocCount.size(), buckets.size()); buckets.forEach(bucket -> assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } ); } @@ -259,15 +292,15 @@ public void testWithLargeNumberOfBuckets() { } public void testIntervalDay() throws IOException { - final List datesForDayInterval = Arrays.asList( - new DateTime(2017, 2, 1, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 5, 0, 0, 0, DateTimeZone.UTC)); - final Map expectedDocCount = new HashMap<>(); + final List datesForDayInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 5, 0, 0, 0, 0, ZoneOffset.UTC)); + final Map expectedDocCount = new HashMap<>(); expectedDocCount.put(datesForDayInterval.get(0), 1); expectedDocCount.put(datesForDayInterval.get(1), 2); expectedDocCount.put(datesForDayInterval.get(3), 3); @@ -287,21 +320,22 @@ public void testIntervalDay() throws IOException { assertEquals(5, buckets.size()); buckets.forEach(bucket -> assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } ); } public void testIntervalDayWithTZ() throws IOException { - final List datesForDayInterval = Arrays.asList( - new DateTime(2017, 2, 1, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 5, 0, 0, 0, DateTimeZone.UTC)); + final List datesForDayInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 5, 0, 0, 0, 0, ZoneOffset.UTC)); testSearchCase(DEFAULT_QUERY, datesForDayInterval, - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(ZoneOffset.ofHours(-1)), histogram -> { final Map expectedDocCount = new HashMap<>(); expectedDocCount.put("2017-01-31T23:00:00.000-01:00", 1); expectedDocCount.put("2017-02-01T23:00:00.000-01:00", 2); @@ -311,9 +345,10 @@ public void testIntervalDayWithTZ() throws IOException { assertEquals(expectedDocCount.size(), buckets.size()); buckets.forEach(bucket -> assertEquals(expectedDocCount.getOrDefault(bucket.getKeyAsString(), 0).longValue(), bucket.getDocCount())); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); }); testSearchAndReduceCase(DEFAULT_QUERY, datesForDayInterval, - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(ZoneOffset.ofHours(-1)), histogram -> { final Map expectedDocCount = new HashMap<>(); expectedDocCount.put("2017-01-31T00:00:00.000-01:00", 1); expectedDocCount.put("2017-02-01T00:00:00.000-01:00", 2); @@ -323,21 +358,22 @@ public void testIntervalDayWithTZ() throws IOException { assertEquals(5, buckets.size()); buckets.forEach(bucket -> assertEquals(expectedDocCount.getOrDefault(bucket.getKeyAsString(), 0).longValue(), bucket.getDocCount())); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); }); } public void testIntervalHour() throws IOException { - final List datesForHourInterval = Arrays.asList( - new DateTime(2017, 2, 1, 9, 2, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 35, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 10, 15, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 13, 6, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 14, 4, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 14, 5, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 15, 59, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 6, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 48, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 59, 0, DateTimeZone.UTC)); + final List datesForHourInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 9, 2, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 35, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 10, 15, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 13, 6, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 14, 4, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 14, 5, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 15, 59, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 6, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 48, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 59, 0, 0, ZoneOffset.UTC)); testSearchCase(DEFAULT_QUERY, datesForHourInterval, aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), histogram -> { @@ -353,13 +389,13 @@ public void testIntervalHour() throws IOException { testSearchAndReduceCase(DEFAULT_QUERY, datesForHourInterval, aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), histogram -> { - final Map expectedDocCount = new HashMap<>(); - expectedDocCount.put(datesForHourInterval.get(0).withMinuteOfHour(0), 2); - expectedDocCount.put(datesForHourInterval.get(2).withMinuteOfHour(0), 1); - expectedDocCount.put(datesForHourInterval.get(3).withMinuteOfHour(0), 1); - expectedDocCount.put(datesForHourInterval.get(4).withMinuteOfHour(0), 2); - expectedDocCount.put(datesForHourInterval.get(6).withMinuteOfHour(0), 1); - expectedDocCount.put(datesForHourInterval.get(7).withMinuteOfHour(0), 3); + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForHourInterval.get(0).withMinute(0), 2); + expectedDocCount.put(datesForHourInterval.get(2).withMinute(0), 1); + expectedDocCount.put(datesForHourInterval.get(3).withMinute(0), 1); + expectedDocCount.put(datesForHourInterval.get(4).withMinute(0), 2); + expectedDocCount.put(datesForHourInterval.get(6).withMinute(0), 1); + expectedDocCount.put(datesForHourInterval.get(7).withMinute(0), 3); final List buckets = histogram.getBuckets(); assertEquals(8, buckets.size()); buckets.forEach(bucket -> @@ -369,10 +405,10 @@ public void testIntervalHour() throws IOException { testSearchAndReduceCase(DEFAULT_QUERY, datesForHourInterval, aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), histogram -> { - final Map expectedDocCount = new HashMap<>(); - expectedDocCount.put(datesForHourInterval.get(0).withMinuteOfHour(0), 3); - expectedDocCount.put(datesForHourInterval.get(0).plusHours(3).withMinuteOfHour(0), 3); - expectedDocCount.put(datesForHourInterval.get(0).plusHours(6).withMinuteOfHour(0), 4); + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForHourInterval.get(0).withMinute(0), 3); + expectedDocCount.put(datesForHourInterval.get(0).plusHours(3).withMinute(0), 3); + expectedDocCount.put(datesForHourInterval.get(0).plusHours(6).withMinute(0), 4); final List buckets = histogram.getBuckets(); assertEquals(expectedDocCount.size(), buckets.size()); buckets.forEach(bucket -> @@ -382,22 +418,23 @@ public void testIntervalHour() throws IOException { } public void testIntervalHourWithTZ() throws IOException { - final List datesForHourInterval = Arrays.asList( - new DateTime(2017, 2, 1, 9, 2, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 35, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 10, 15, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 13, 6, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 14, 4, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 14, 5, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 15, 59, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 6, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 48, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 59, 0, DateTimeZone.UTC)); + final List datesForHourInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 9, 2, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 35, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 10, 15, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 13, 6, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 14, 4, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 14, 5, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 15, 59, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 6, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 48, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 59, 0, 0, ZoneOffset.UTC)); testSearchCase(DEFAULT_QUERY, datesForHourInterval, - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD).timeZone(ZoneOffset.ofHours(-1)), histogram -> { final List dateStrings = datesForHourInterval.stream() - .map(dateTime -> dateTime.withZone(DateTimeZone.forOffsetHours(-1)).toString()).collect(Collectors.toList()); + .map(dateTime -> DateFormatter.forPattern("strict_date_time") + .format(dateTime.withZoneSameInstant(ZoneOffset.ofHours(-1)))).collect(Collectors.toList()); final List buckets = histogram.getBuckets(); assertEquals(datesForHourInterval.size(), buckets.size()); for (int i = 0; i < buckets.size(); i++) { @@ -408,7 +445,7 @@ public void testIntervalHourWithTZ() throws IOException { } ); testSearchAndReduceCase(DEFAULT_QUERY, datesForHourInterval, - aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), + aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD).timeZone(ZoneOffset.ofHours(-1)), histogram -> { final Map expectedDocCount = new HashMap<>(); expectedDocCount.put("2017-02-01T08:00:00.000-01:00", 2); @@ -427,10 +464,10 @@ public void testIntervalHourWithTZ() throws IOException { public void testRandomSecondIntervals() throws IOException { final int length = 120; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusSeconds(i); + final ZonedDateTime date = startDate.plusSeconds(i); dataset.add(date); } final Map bucketsToExpectedDocCountMap = new HashMap<>(); @@ -456,10 +493,10 @@ public void testRandomSecondIntervals() throws IOException { public void testRandomMinuteIntervals() throws IOException { final int length = 120; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusMinutes(i); + final ZonedDateTime date = startDate.plusMinutes(i); dataset.add(date); } final Map bucketsToExpectedDocCountMap = new HashMap<>(); @@ -485,10 +522,10 @@ public void testRandomMinuteIntervals() throws IOException { public void testRandomHourIntervals() throws IOException { final int length = 72; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusHours(i); + final ZonedDateTime date = startDate.plusHours(i); dataset.add(date); } final Map bucketsToExpectedDocCountMap = new HashMap<>(); @@ -513,10 +550,10 @@ public void testRandomHourIntervals() throws IOException { public void testRandomDayIntervals() throws IOException { final int length = 140; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusDays(i); + final ZonedDateTime date = startDate.plusDays(i); dataset.add(date); } final int randomChoice = randomIntBetween(1, 3); @@ -552,17 +589,17 @@ public void testRandomDayIntervals() throws IOException { final int randomIndex = randomInt(2); final Histogram.Bucket bucket = buckets.get(randomIndex); assertEquals(startDate.plusMonths(randomIndex), bucket.getKey()); - assertEquals(startDate.plusMonths(randomIndex).dayOfMonth().getMaximumValue(), bucket.getDocCount()); + assertEquals(YearMonth.from(startDate.plusMonths(randomIndex)).lengthOfMonth(), bucket.getDocCount()); }); } } public void testRandomMonthIntervals() throws IOException { final int length = 60; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusMonths(i); + final ZonedDateTime date = startDate.plusMonths(i); dataset.add(date); } final Map bucketsToExpectedDocCountMap = new HashMap<>(); @@ -586,10 +623,10 @@ public void testRandomMonthIntervals() throws IOException { public void testRandomYearIntervals() throws IOException { final int length = 300; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusYears(i); + final ZonedDateTime date = startDate.plusYears(i); dataset.add(date); } final Map bucketsToExpectedDocCountMap = new HashMap<>(); @@ -615,12 +652,12 @@ public void testRandomYearIntervals() throws IOException { } public void testIntervalMinute() throws IOException { - final List datesForMinuteInterval = Arrays.asList( - new DateTime(2017, 2, 1, 9, 2, 35, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 2, 59, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 15, 37, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 16, 4, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 16, 42, DateTimeZone.UTC)); + final List datesForMinuteInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 9, 2, 35, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 2, 59, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 15, 37, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 16, 4, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 16, 42, 0, ZoneOffset.UTC)); testSearchCase(DEFAULT_QUERY, datesForMinuteInterval, aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), @@ -637,10 +674,10 @@ public void testIntervalMinute() throws IOException { testSearchAndReduceCase(DEFAULT_QUERY, datesForMinuteInterval, aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), histogram -> { - final Map expectedDocCount = new HashMap<>(); - expectedDocCount.put(datesForMinuteInterval.get(0).withSecondOfMinute(0), 2); - expectedDocCount.put(datesForMinuteInterval.get(2).withSecondOfMinute(0), 1); - expectedDocCount.put(datesForMinuteInterval.get(3).withSecondOfMinute(0), 2); + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForMinuteInterval.get(0).withSecond(0), 2); + expectedDocCount.put(datesForMinuteInterval.get(2).withSecond(0), 1); + expectedDocCount.put(datesForMinuteInterval.get(3).withSecond(0), 2); final List buckets = histogram.getBuckets(); assertEquals(15, buckets.size()); buckets.forEach(bucket -> @@ -650,15 +687,15 @@ public void testIntervalMinute() throws IOException { } public void testIntervalSecond() throws IOException { - final List datesForSecondInterval = Arrays.asList( - new DateTime(2017, 2, 1, 0, 0, 5, 15, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 0, 0, 7, 299, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 0, 0, 7, 74, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 0, 0, 11, 688, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 0, 0, 11, 210, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 0, 0, 11, 380, DateTimeZone.UTC)); - final DateTime startDate = datesForSecondInterval.get(0).withMillisOfSecond(0); - final Map expectedDocCount = new HashMap<>(); + final List datesForSecondInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 0, 0, 5, 15, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 0, 0, 7, 299, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 0, 0, 7, 74, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 0, 0, 11, 688, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 0, 0, 11, 210, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 0, 0, 11, 380, ZoneOffset.UTC)); + final ZonedDateTime startDate = datesForSecondInterval.get(0).withNano(0); + final Map expectedDocCount = new HashMap<>(); expectedDocCount.put(startDate, 1); expectedDocCount.put(startDate.plusSeconds(2), 2); expectedDocCount.put(startDate.plusSeconds(6), 3); @@ -681,21 +718,21 @@ public void testIntervalSecond() throws IOException { ); } - private void testSearchCase(final Query query, final List dataset, + private void testSearchCase(final Query query, final List dataset, final Consumer configure, - final Consumer verify) throws IOException { + final Consumer verify) throws IOException { executeTestCase(false, query, dataset, configure, verify); } - private void testSearchAndReduceCase(final Query query, final List dataset, + private void testSearchAndReduceCase(final Query query, final List dataset, final Consumer configure, - final Consumer verify) throws IOException { + final Consumer verify) throws IOException { executeTestCase(true, query, dataset, configure, verify); } - private void testBothCases(final Query query, final List dataset, + private void testBothCases(final Query query, final List dataset, final Consumer configure, - final Consumer verify) throws IOException { + final Consumer verify) throws IOException { executeTestCase(false, query, dataset, configure, verify); executeTestCase(true, query, dataset, configure, verify); } @@ -714,18 +751,18 @@ protected IndexSettings createIndexSettings() { ); } - private void executeTestCase(final boolean reduced, final Query query, final List dataset, + private void executeTestCase(final boolean reduced, final Query query, final List dataset, final Consumer configure, - final Consumer verify) throws IOException { + final Consumer verify) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { final Document document = new Document(); - for (final DateTime date : dataset) { + for (final ZonedDateTime date : dataset) { if (frequently()) { indexWriter.commit(); } - final long instant = date.getMillis(); + final long instant = date.toInstant().toEpochMilli(); document.add(new SortedNumericDocValuesField(DATE_FIELD, instant)); document.add(new LongPoint(INSTANT_FIELD, instant)); indexWriter.addDocument(document); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 4e6004444a63e..2fbf60a3ddccb 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -30,10 +30,12 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.MultiBucketConsumerService.TooManyBucketsException; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; @@ -61,7 +63,10 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { public void testMatchNoDocs() throws IOException { testBothCases(new MatchNoDocsQuery(), dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); } @@ -70,15 +75,24 @@ public void testMatchAllDocs() throws IOException { testSearchCase(query, dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), - histogram -> assertEquals(6, histogram.getBuckets().size()) + histogram -> { + assertEquals(6, histogram.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } ); testSearchAndReduceCase(query, dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), - histogram -> assertEquals(8, histogram.getBuckets().size()) + histogram -> { + assertEquals(8, histogram.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } ); testBothCases(query, dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD).minDocCount(1L), - histogram -> assertEquals(6, histogram.getBuckets().size()) + histogram -> { + assertEquals(6, histogram.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } ); } @@ -89,7 +103,10 @@ public void testNoDocs() throws IOException { agg.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD); testSearchCase(query, dates, aggregation, - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); testSearchAndReduceCase(query, dates, aggregation, histogram -> assertNull(histogram) @@ -99,7 +116,10 @@ public void testNoDocs() throws IOException { public void testAggregateWrongField() throws IOException { testBothCases(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field("wrong_field"), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); } @@ -371,39 +391,39 @@ public void testMaxBucket() throws IOException { private void testSearchCase(Query query, List dataset, Consumer configure, - Consumer verify) throws IOException { + Consumer verify) throws IOException { testSearchCase(query, dataset, configure, verify, 10000); } private void testSearchCase(Query query, List dataset, Consumer configure, - Consumer verify, + Consumer verify, int maxBucket) throws IOException { executeTestCase(false, query, dataset, configure, verify, maxBucket); } private void testSearchAndReduceCase(Query query, List dataset, Consumer configure, - Consumer verify) throws IOException { + Consumer verify) throws IOException { testSearchAndReduceCase(query, dataset, configure, verify, 1000); } private void testSearchAndReduceCase(Query query, List dataset, Consumer configure, - Consumer verify, + Consumer verify, int maxBucket) throws IOException { executeTestCase(true, query, dataset, configure, verify, maxBucket); } private void testBothCases(Query query, List dataset, Consumer configure, - Consumer verify) throws IOException { + Consumer verify) throws IOException { testBothCases(query, dataset, configure, verify, 10000); } private void testBothCases(Query query, List dataset, Consumer configure, - Consumer verify, + Consumer verify, int maxBucket) throws IOException { testSearchCase(query, dataset, configure, verify, maxBucket); testSearchAndReduceCase(query, dataset, configure, verify, maxBucket); @@ -411,7 +431,7 @@ private void testBothCases(Query query, List dataset, private void executeTestCase(boolean reduced, Query query, List dataset, Consumer configure, - Consumer verify, + Consumer verify, int maxBucket) throws IOException { try (Directory directory = newDirectory()) { @@ -455,6 +475,6 @@ private void executeTestCase(boolean reduced, Query query, List dataset, } private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java index 5148b0b85754f..c65b21ef72d32 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java @@ -31,9 +31,10 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.BucketOrder; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.List; @@ -137,7 +138,7 @@ private static Document documentForDate(String field, long millis) { } public void testRewriteTimeZone() throws IOException { - DateFormatter format = DateFormatters.forPattern("strict_date_optional_time"); + DateFormatter format = DateFormatter.forPattern("strict_date_optional_time"); try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) { @@ -166,15 +167,15 @@ public void testRewriteTimeZone() throws IOException { assertNull(builder.rewriteTimeZone(shardContextThatCrosses)); // fixed timeZone => no rewrite - DateTimeZone tz = DateTimeZone.forOffsetHours(1); + ZoneId tz = ZoneOffset.ofHours(1); builder.timeZone(tz); assertSame(tz, builder.rewriteTimeZone(shardContextThatDoesntCross)); assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); // daylight-saving-times => rewrite if doesn't cross - tz = DateTimeZone.forID("Europe/Paris"); + tz = ZoneId.of("Europe/Paris"); builder.timeZone(tz); - assertEquals(DateTimeZone.forOffsetHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross)); + assertEquals(ZoneOffset.ofHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross)); assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); // Rounded values are no longer all within the same transitions => no rewrite @@ -187,7 +188,7 @@ public void testRewriteTimeZone() throws IOException { builder.timeZone(tz); builder.interval(1000L * 60 * 60 * 24); // ~ 1 day - assertEquals(DateTimeZone.forOffsetHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross)); + assertEquals(ZoneOffset.ofHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross)); assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); // Because the interval is large, rounded values are not diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java index 486f78778c452..0980bb7cf97ec 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java @@ -36,13 +36,9 @@ import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; -import org.joda.time.Instant; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; import java.io.IOException; -import java.util.Locale; +import java.time.ZoneOffset; import static java.lang.Math.max; import static java.lang.Math.min; @@ -66,17 +62,19 @@ public static ExtendedBounds randomExtendedBounds() { * Construct a random {@link ExtendedBounds} in pre-parsed form. */ public static ExtendedBounds randomParsedExtendedBounds() { + long maxDateValue = 253402300799999L; // end of year 9999 + long minDateValue = -377705116800000L; // beginning of year -9999 if (randomBoolean()) { // Construct with one missing bound if (randomBoolean()) { - return new ExtendedBounds(null, randomLong()); + return new ExtendedBounds(null, maxDateValue); } - return new ExtendedBounds(randomLong(), null); + return new ExtendedBounds(minDateValue, null); } - long a = randomLong(); + long a = randomLongBetween(minDateValue, maxDateValue); long b; do { - b = randomLong(); + b = randomLongBetween(minDateValue, maxDateValue); } while (a == b); long min = min(a, b); long max = max(a, b); @@ -88,9 +86,9 @@ public static ExtendedBounds randomParsedExtendedBounds() { */ public static ExtendedBounds unparsed(ExtendedBounds template) { // It'd probably be better to randomize the formatter - DateTimeFormatter formatter = ISODateTimeFormat.dateTime().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC); - String minAsStr = template.getMin() == null ? null : formatter.print(new Instant(template.getMin())); - String maxAsStr = template.getMax() == null ? null : formatter.print(new Instant(template.getMax())); + DateFormatter formatter = DateFormatter.forPattern("strict_date_time").withZone(ZoneOffset.UTC); + String minAsStr = template.getMin() == null ? null : formatter.formatMillis(template.getMin()); + String maxAsStr = template.getMax() == null ? null : formatter.formatMillis(template.getMax()); return new ExtendedBounds(minAsStr, maxAsStr); } @@ -104,7 +102,7 @@ public void testParseAndValidate() { null, xContentRegistry(), writableRegistry(), null, null, () -> now, null); when(context.getQueryShardContext()).thenReturn(qsc); DateFormatter formatter = DateFormatter.forPattern("dateOptionalTime"); - DocValueFormat format = new DocValueFormat.DateTime(formatter, DateTimeZone.UTC); + DocValueFormat format = new DocValueFormat.DateTime(formatter, ZoneOffset.UTC); ExtendedBounds expected = randomParsedExtendedBounds(); ExtendedBounds parsed = unparsed(expected).parseAndValidate("test", context, format); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java index 81ffd1c78a3f7..bdb99a4971ac1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; public class HistogramAggregatorTests extends AggregatorTestCase { @@ -49,7 +50,7 @@ public void testLongs() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(4, histogram.getBuckets().size()); assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(2, histogram.getBuckets().get(0).getDocCount()); @@ -59,6 +60,7 @@ public void testLongs() throws Exception { assertEquals(2, histogram.getBuckets().get(2).getDocCount()); assertEquals(50d, histogram.getBuckets().get(3).getKey()); assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -79,7 +81,7 @@ public void testDoubles() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(4, histogram.getBuckets().size()); assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(2, histogram.getBuckets().get(0).getDocCount()); @@ -89,6 +91,7 @@ public void testDoubles() throws Exception { assertEquals(2, histogram.getBuckets().get(2).getDocCount()); assertEquals(50d, histogram.getBuckets().get(3).getKey()); assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -109,7 +112,7 @@ public void testIrrationalInterval() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(4, histogram.getBuckets().size()); assertEquals(-4 * Math.PI, histogram.getBuckets().get(0).getKey()); assertEquals(1, histogram.getBuckets().get(0).getDocCount()); @@ -119,6 +122,7 @@ public void testIrrationalInterval() throws Exception { assertEquals(2, histogram.getBuckets().get(2).getDocCount()); assertEquals(Math.PI, histogram.getBuckets().get(3).getKey()); assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -140,12 +144,13 @@ public void testMinDocCount() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(2, histogram.getBuckets().size()); assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(2, histogram.getBuckets().get(0).getDocCount()); assertEquals(0d, histogram.getBuckets().get(1).getKey()); assertEquals(3, histogram.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -168,7 +173,7 @@ public void testMissing() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(4, histogram.getBuckets().size()); assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(2, histogram.getBuckets().get(0).getDocCount()); @@ -178,6 +183,7 @@ public void testMissing() throws Exception { assertEquals(2, histogram.getBuckets().get(2).getDocCount()); assertEquals(50d, histogram.getBuckets().get(3).getKey()); assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -199,7 +205,7 @@ public void testOffset() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(3, histogram.getBuckets().size()); assertEquals(-10 + Math.PI, histogram.getBuckets().get(0).getKey()); assertEquals(2, histogram.getBuckets().get(0).getDocCount()); @@ -207,6 +213,7 @@ public void testOffset() throws Exception { assertEquals(2, histogram.getBuckets().get(1).getDocCount()); assertEquals(5 + Math.PI, histogram.getBuckets().get(2).getKey()); assertEquals(1, histogram.getBuckets().get(2).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -228,7 +235,7 @@ public void testExtendedBounds() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(6, histogram.getBuckets().size()); assertEquals(-15d, histogram.getBuckets().get(0).getKey()); assertEquals(0, histogram.getBuckets().get(0).getDocCount()); @@ -242,6 +249,7 @@ public void testExtendedBounds() throws Exception { assertEquals(0, histogram.getBuckets().get(4).getDocCount()); assertEquals(10d, histogram.getBuckets().get(5).getKey()); assertEquals(0, histogram.getBuckets().get(5).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java index dd3425c20f43c..fe5c967f54be8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; @@ -28,12 +28,12 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram.BucketInfo; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.time.Instant; import java.time.OffsetDateTime; +import java.time.ZoneId; import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -89,16 +89,16 @@ protected InternalAutoDateHistogram createTestInstance(String name, */ public void testGetAppropriateRoundingUsesCorrectIntervals() { RoundingInfo[] roundings = new RoundingInfo[6]; - DateTimeZone timeZone = DateTimeZone.UTC; + ZoneId timeZone = ZoneOffset.UTC; // Since we pass 0 as the starting index to getAppropriateRounding, we'll also use // an innerInterval that is quite large, such that targetBuckets * roundings[i].getMaximumInnerInterval() // will be larger than the estimate. - roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE, timeZone), - 1000L, "s", 1000); - roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR, timeZone), - 60 * 1000L, "m", 1, 5, 10, 30); - roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY, timeZone), - 60 * 60 * 1000L, "h", 1, 3, 12); + roundings[0] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.SECOND_OF_MINUTE, timeZone), + 1000L, "s",1000); + roundings[1] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.MINUTES_OF_HOUR, timeZone), + 60 * 1000L, "m",1, 5, 10, 30); + roundings[2] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.HOUR_OF_DAY, timeZone), + 60 * 60 * 1000L, "h",1, 3, 12); OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC); // We want to pass a roundingIdx of zero, because in order to reproduce this bug, we need the function @@ -117,7 +117,7 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List= keyForBucket && roundedBucketKey < keyForBucket + intervalInMillis) { @@ -194,7 +194,7 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List actualCounts = new TreeMap<>(); for (Histogram.Bucket bucket : reduced.getBuckets()) { - actualCounts.compute(((DateTime) bucket.getKey()).getMillis(), + actualCounts.compute(((ZonedDateTime) bucket.getKey()).toInstant().toEpochMilli(), (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); } assertEquals(expectedCounts, actualCounts); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java index dd5d06f8785f7..961a05a7c40fd 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.BucketOrder; @@ -28,8 +28,8 @@ import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; -import org.joda.time.DateTime; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -104,7 +104,7 @@ protected void assertReduced(InternalDateHistogram reduced, List expectedCounts = new TreeMap<>(); for (Histogram histogram : inputs) { for (Histogram.Bucket bucket : histogram.getBuckets()) { - expectedCounts.compute(((DateTime) bucket.getKey()).getMillis(), + expectedCounts.compute(((ZonedDateTime) bucket.getKey()).toInstant().toEpochMilli(), (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); } } @@ -139,7 +139,7 @@ protected void assertReduced(InternalDateHistogram reduced, List actualCounts = new TreeMap<>(); for (Histogram.Bucket bucket : reduced.getBuckets()) { - actualCounts.compute(((DateTime) bucket.getKey()).getMillis(), + actualCounts.compute(((ZonedDateTime) bucket.getKey()).toInstant().toEpochMilli(), (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); } assertEquals(expectedCounts, actualCounts); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java index 424c3aed2105d..daaeb94d8fae9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; @@ -45,7 +46,10 @@ public void testMatchNoDocs() throws IOException { "field", Queries.newMatchAllQuery(), doc -> doc.add(new SortedNumericDocValuesField("field", randomLong())), - internalMissing -> assertEquals(internalMissing.getDocCount(), 0)); + internalMissing -> { + assertEquals(internalMissing.getDocCount(), 0); + assertFalse(AggregationInspectionHelper.hasValue(internalMissing)); + }); } public void testMatchAllDocs() throws IOException { @@ -54,7 +58,10 @@ public void testMatchAllDocs() throws IOException { "field", Queries.newMatchAllQuery(), doc -> doc.add(new SortedNumericDocValuesField("another_field", randomLong())), - internalMissing -> assertEquals(internalMissing.getDocCount(), numDocs)); + internalMissing -> { + assertEquals(internalMissing.getDocCount(), numDocs); + assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); + }); } public void testMatchSparse() throws IOException { @@ -74,6 +81,7 @@ public void testMatchSparse() throws IOException { internalMissing -> { assertEquals(internalMissing.getDocCount(), count.get()); count.set(0); + assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); }); } @@ -87,6 +95,7 @@ public void testMissingField() throws IOException { }, internalMissing -> { assertEquals(internalMissing.getDocCount(), numDocs); + assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index e9911a7034c2f..1eef8de86b304 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalSum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.test.VersionUtils; @@ -115,16 +116,16 @@ public void testNoDocs() throws IOException { NumberFieldMapper.NumberType.LONG); fieldType.setName(VALUE_FIELD_NAME); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), nestedBuilder, fieldType); assertEquals(NESTED_AGG, nested.getName()); assertEquals(0, nested.getDocCount()); - InternalMax max = (InternalMax) - ((InternalAggregation)nested).getProperty(MAX_AGG_NAME); + InternalMax max = (InternalMax) nested.getProperty(MAX_AGG_NAME); assertEquals(MAX_AGG_NAME, max.getName()); assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), Double.MIN_VALUE); + assertFalse(AggregationInspectionHelper.hasValue(nested)); } } } @@ -162,17 +163,18 @@ public void testSingleNestingMax() throws IOException { NumberFieldMapper.NumberType.LONG); fieldType.setName(VALUE_FIELD_NAME); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), nestedBuilder, fieldType); assertEquals(expectedNestedDocs, nested.getDocCount()); assertEquals(NESTED_AGG, nested.getName()); assertEquals(expectedNestedDocs, nested.getDocCount()); - InternalMax max = (InternalMax) - ((InternalAggregation)nested).getProperty(MAX_AGG_NAME); + InternalMax max = (InternalMax) nested.getProperty(MAX_AGG_NAME); assertEquals(MAX_AGG_NAME, max.getName()); assertEquals(expectedMaxValue, max.getValue(), Double.MIN_VALUE); + + assertTrue(AggregationInspectionHelper.hasValue(nested)); } } } @@ -211,17 +213,18 @@ public void testDoubleNestingMax() throws IOException { NumberFieldMapper.NumberType.LONG); fieldType.setName(VALUE_FIELD_NAME); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), nestedBuilder, fieldType); assertEquals(expectedNestedDocs, nested.getDocCount()); assertEquals(NESTED_AGG, nested.getName()); assertEquals(expectedNestedDocs, nested.getDocCount()); - InternalMax max = (InternalMax) - ((InternalAggregation)nested).getProperty(MAX_AGG_NAME); + InternalMax max = (InternalMax) nested.getProperty(MAX_AGG_NAME); assertEquals(MAX_AGG_NAME, max.getName()); assertEquals(expectedMaxValue, max.getValue(), Double.MIN_VALUE); + + assertTrue(AggregationInspectionHelper.hasValue(nested)); } } } @@ -263,7 +266,7 @@ public void testOrphanedDocs() throws IOException { NumberFieldMapper.NumberType.LONG); fieldType.setName(VALUE_FIELD_NAME); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), nestedBuilder, fieldType); assertEquals(expectedNestedDocs, nested.getDocCount()); @@ -348,13 +351,15 @@ public void testResetRootDocId() throws Exception { bq.add(Queries.newNonNestedFilter(VersionUtils.randomVersion(random())), BooleanClause.Occur.MUST); bq.add(new TermQuery(new Term(IdFieldMapper.NAME, Uid.encodeId("2"))), BooleanClause.Occur.MUST_NOT); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new ConstantScoreQuery(bq.build()), nestedBuilder, fieldType); assertEquals(NESTED_AGG, nested.getName()); // The bug manifests if 6 docs are returned, because currentRootDoc isn't reset the previous child docs from the first // segment are emitted as hits. assertEquals(4L, nested.getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(nested)); } } } @@ -630,7 +635,7 @@ public void testPreGetChildLeafCollectors() throws IOException { assertEquals("filterAgg", filter.getName()); assertEquals(3L, filter.getDocCount()); - Nested nested = filter.getAggregations().get(NESTED_AGG); + InternalNested nested = filter.getAggregations().get(NESTED_AGG); assertEquals(6L, nested.getDocCount()); StringTerms keyAgg = nested.getAggregations().get("key"); @@ -687,7 +692,7 @@ public void testFieldAlias() throws IOException { NestedAggregationBuilder aliasAgg = nested(NESTED_AGG, NESTED_OBJECT).subAggregation( max(MAX_AGG_NAME).field(VALUE_FIELD_NAME + "-alias")); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), agg, fieldType); Nested aliasNested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), aliasAgg, fieldType); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java index e446dfb3d2b9a..52988764d3bc3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.metrics.Min; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; @@ -75,10 +76,11 @@ public void testSampler() throws IOException { try (IndexReader reader = DirectoryReader.open(w)) { assertEquals("test expects a single segment", 1, reader.leaves().size()); IndexSearcher searcher = new IndexSearcher(reader); - Sampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "good")), aggBuilder, textFieldType, + InternalSampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "good")), aggBuilder, textFieldType, numericFieldType); Min min = sampler.getAggregations().get("min"); assertEquals(5.0, min.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(sampler)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java index dbff6daed6285..123bb0f60427c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java @@ -37,8 +37,9 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.bucket.sampler.Sampler; +import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; @@ -88,7 +89,7 @@ public void testSignificance() throws IOException { IndexSearcher searcher = new IndexSearcher(reader); // Search "odd" which should have no duplication - Sampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), aggBuilder, textFieldType); + InternalSampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), aggBuilder, textFieldType); SignificantTerms terms = sampler.getAggregations().get("sig_text"); assertNull(terms.getBucketByKey("even")); @@ -109,6 +110,7 @@ public void testSignificance() throws IOException { assertNotNull(terms.getBucketByKey("even")); + assertTrue(AggregationInspectionHelper.hasValue(sampler)); } } } @@ -142,8 +144,9 @@ public void testFieldAlias() throws IOException { SamplerAggregationBuilder samplerAgg = sampler("sampler").subAggregation(agg); SamplerAggregationBuilder aliasSamplerAgg = sampler("sampler").subAggregation(aliasAgg); - Sampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), samplerAgg, textFieldType); - Sampler aliasSampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), aliasSamplerAgg, textFieldType); + InternalSampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), samplerAgg, textFieldType); + InternalSampler aliasSampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), + aliasSamplerAgg, textFieldType); SignificantTerms terms = sampler.getAggregations().get("sig_text"); SignificantTerms aliasTerms = aliasSampler.getAggregations().get("sig_text"); @@ -157,6 +160,9 @@ public void testFieldAlias() throws IOException { aliasTerms = aliasSampler.getAggregations().get("sig_text"); assertFalse(terms.getBuckets().isEmpty()); assertEquals(terms, aliasTerms); + + assertTrue(AggregationInspectionHelper.hasValue(sampler)); + assertTrue(AggregationInspectionHelper.hasValue(aliasSampler)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index d8948c1061369..8acf78d301af5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -73,6 +73,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; @@ -199,6 +200,7 @@ public void testSimple() throws Exception { assertEquals(1L, result.getBuckets().get(3).getDocCount()); assertEquals("d", result.getBuckets().get(4).getKeyAsString()); assertEquals(1L, result.getBuckets().get(4).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); } } } @@ -278,6 +280,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(8).getDocCount()); assertEquals("val009", result.getBuckets().get(9).getKeyAsString()); assertEquals(1L, result.getBuckets().get(9).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); MappedFieldType fieldType2 = new KeywordFieldMapper.KeywordFieldType(); fieldType2.setName("sv_field"); @@ -304,6 +307,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(3).getDocCount()); assertEquals("val009", result.getBuckets().get(4).getKeyAsString()); assertEquals(1L, result.getBuckets().get(4).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) .executionHint(executionHint) @@ -333,6 +337,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(6).getDocCount()); assertEquals("val009", result.getBuckets().get(7).getKeyAsString()); assertEquals(1L, result.getBuckets().get(7).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) .executionHint(executionHint) @@ -349,6 +354,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("val011", result.getBuckets().get(1).getKeyAsString()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) .executionHint(executionHint) @@ -365,6 +371,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("val010", result.getBuckets().get(1).getKeyAsString()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) .executionHint(executionHint) @@ -382,6 +389,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("val010", result.getBuckets().get(1).getKeyAsString()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); } } } @@ -436,6 +444,7 @@ public void testNumericIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals(5L, result.getBuckets().get(1).getKey()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.LONG) .executionHint(executionHint) @@ -456,6 +465,7 @@ public void testNumericIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(2).getDocCount()); assertEquals(4L, result.getBuckets().get(3).getKey()); assertEquals(1L, result.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); fieldType.setName("double_field"); @@ -475,6 +485,7 @@ public void testNumericIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals(5.0, result.getBuckets().get(1).getKey()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.DOUBLE) .executionHint(executionHint) @@ -495,6 +506,7 @@ public void testNumericIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(2).getDocCount()); assertEquals(4.0, result.getBuckets().get(3).getKey()); assertEquals(1L, result.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java index b83acfcba80ec..3e86571ae45e9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java @@ -35,9 +35,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.AvgAggregator; -import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; @@ -52,6 +50,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -61,6 +60,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 3))); }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -71,6 +71,7 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("number", 3))); }, avg -> { assertEquals(4, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -81,6 +82,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { iw.addDocument(singleton(new NumericDocValuesField("number", 3))); }, avg -> { assertEquals(4, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -91,6 +93,7 @@ public void testQueryFiltering() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("number", 3), new SortedNumericDocValuesField("number", 3))); }, avg -> { assertEquals(2.5, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -101,6 +104,7 @@ public void testQueryFiltersAll() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("number", 3), new SortedNumericDocValuesField("number", 7))); }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java index a2789a9ef1648..4f5a8bb1ea484 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; @@ -48,6 +49,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, card -> { assertEquals(0.0, card.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(card)); }); } @@ -57,6 +59,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); }, card -> { assertEquals(0.0, card.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(card)); }); } @@ -66,6 +69,7 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("number", 1))); }, card -> { assertEquals(2, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); }); } @@ -75,6 +79,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { iw.addDocument(singleton(new NumericDocValuesField("number", 1))); }, card -> { assertEquals(2, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); }); } @@ -86,6 +91,7 @@ public void testQueryFiltering() throws IOException { new SortedNumericDocValuesField("number", 1))); }, card -> { assertEquals(1, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); }); } @@ -97,6 +103,7 @@ public void testQueryFiltersAll() throws IOException { new SortedNumericDocValuesField("number", 1))); }, card -> { assertEquals(0.0, card.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(card)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java index e65d1269520bc..ca26ba1b20672 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.function.Consumer; @@ -55,6 +56,7 @@ public void testEmpty() throws IOException { assertEquals(Double.NaN, stats.getVariance(), 0); assertEquals(Double.NaN, stats.getStdDeviation(), 0); assertEquals(0d, stats.getSumOfSquares(), 0); + assertFalse(AggregationInspectionHelper.hasValue(stats)); } ); } @@ -92,6 +94,7 @@ public void testRandomDoubles() throws IOException { stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER), TOLERANCE); assertEquals(expected.stdDevBound(ExtendedStats.Bounds.UPPER, stats.getSigma()), stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER), TOLERANCE); + assertTrue(AggregationInspectionHelper.hasValue(stats)); } ); } @@ -128,6 +131,7 @@ public void testRandomLongs() throws IOException { stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER), TOLERANCE); assertEquals(expected.stdDevBound(ExtendedStats.Bounds.UPPER, stats.getSigma()), stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER), TOLERANCE); + assertTrue(AggregationInspectionHelper.hasValue(stats)); } ); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java index b171e7436eee4..562d29416dcd8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.test.geo.RandomGeoGenerator; import static org.elasticsearch.search.aggregations.metrics.InternalGeoBoundsTests.GEOHASH_TOLERANCE; @@ -55,6 +56,7 @@ public void testEmpty() throws Exception { assertTrue(Double.isInfinite(bounds.posRight)); assertTrue(Double.isInfinite(bounds.negLeft)); assertTrue(Double.isInfinite(bounds.negRight)); + assertFalse(AggregationInspectionHelper.hasValue(bounds)); } } } @@ -112,6 +114,7 @@ public void testRandom() throws Exception { assertThat(bounds.posRight, closeTo(posRight, GEOHASH_TOLERANCE)); assertThat(bounds.negRight, closeTo(negRight, GEOHASH_TOLERANCE)); assertThat(bounds.negLeft, closeTo(negLeft, GEOHASH_TOLERANCE)); + assertTrue(AggregationInspectionHelper.hasValue(bounds)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java index 3865070741258..303ed65f44856 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalGeoCentroid; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.test.geo.RandomGeoGenerator; import java.io.IOException; @@ -52,6 +53,7 @@ public void testEmpty() throws Exception { IndexSearcher searcher = new IndexSearcher(reader); InternalGeoCentroid result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertNull(result.centroid()); + assertFalse(AggregationInspectionHelper.hasValue(result)); } } } @@ -79,6 +81,7 @@ public void testUnmapped() throws Exception { fieldType.setName("field"); result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertNull(result.centroid()); + assertFalse(AggregationInspectionHelper.hasValue(result)); } } } @@ -149,6 +152,7 @@ private void assertCentroid(RandomIndexWriter w, GeoPoint expectedCentroid) thro assertNotNull(centroid); assertEquals(expectedCentroid.getLat(), centroid.getLat(), GEOHASH_TOLERANCE); assertEquals(expectedCentroid.getLon(), centroid.getLon(), GEOHASH_TOLERANCE); + assertTrue(AggregationInspectionHelper.hasValue(result)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java index 52bd6a37e6f6f..9d9c74f283b45 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.aggregations.metrics.PercentileRanks; import org.elasticsearch.search.aggregations.metrics.PercentileRanksAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.hamcrest.Matchers; import java.io.IOException; @@ -55,6 +56,7 @@ public void testEmpty() throws IOException { Percentile rank = ranks.iterator().next(); assertEquals(Double.NaN, rank.getPercent(), 0d); assertEquals(0.5, rank.getValue(), 0d); + assertFalse(AggregationInspectionHelper.hasValue((InternalHDRPercentileRanks)ranks)); } } @@ -87,6 +89,7 @@ public void testSimple() throws IOException { assertEquals(12, rank.getValue(), 0d); assertThat(rank.getPercent(), Matchers.equalTo(100d)); assertFalse(rankIterator.hasNext()); + assertTrue(AggregationInspectionHelper.hasValue((InternalHDRPercentileRanks)ranks)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java index b68b68dd544ea..f08a89657c63b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; import org.elasticsearch.search.aggregations.metrics.PercentilesAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.function.Consumer; @@ -52,6 +53,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, hdr -> { assertEquals(0L, hdr.state.getTotalCount()); + assertFalse(AggregationInspectionHelper.hasValue(hdr)); }); } @@ -61,6 +63,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); }, hdr -> { assertEquals(0L, hdr.state.getTotalCount()); + assertFalse(AggregationInspectionHelper.hasValue(hdr)); }); } @@ -77,6 +80,7 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { assertEquals(20.0d, hdr.percentile(50), approximation); assertEquals(40.0d, hdr.percentile(75), approximation); assertEquals(60.0d, hdr.percentile(99), approximation); + assertTrue(AggregationInspectionHelper.hasValue(hdr)); }); } @@ -93,6 +97,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { assertEquals(20.0d, hdr.percentile(50), approximation); assertEquals(40.0d, hdr.percentile(75), approximation); assertEquals(60.0d, hdr.percentile(99), approximation); + assertTrue(AggregationInspectionHelper.hasValue(hdr)); }); } @@ -107,10 +112,12 @@ public void testQueryFiltering() throws IOException { testCase(LongPoint.newRangeQuery("row", 0, 2), docs, hdr -> { assertEquals(2L, hdr.state.getTotalCount()); assertEquals(10.0d, hdr.percentile(randomDoubleBetween(1, 50, true)), 0.05d); + assertTrue(AggregationInspectionHelper.hasValue(hdr)); }); testCase(LongPoint.newRangeQuery("row", 5, 10), docs, hdr -> { assertEquals(0L, hdr.state.getTotalCount()); + assertFalse(AggregationInspectionHelper.hasValue(hdr)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java index 013f951cc3f2c..d962178661272 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java @@ -49,6 +49,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.ArrayList; @@ -69,6 +70,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, max -> { assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(max)); }); } @@ -78,6 +80,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); }, max -> { assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(max)); }); } @@ -87,6 +90,7 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("number", 1))); }, max -> { assertEquals(7, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); }); } @@ -96,6 +100,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { iw.addDocument(singleton(new NumericDocValuesField("number", 1))); }, max -> { assertEquals(7, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); }); } @@ -105,6 +110,7 @@ public void testQueryFiltering() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("number", 1), new SortedNumericDocValuesField("number", 1))); }, max -> { assertEquals(1, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); }); } @@ -114,6 +120,7 @@ public void testQueryFiltersAll() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("number", 1), new SortedNumericDocValuesField("number", 1))); }, max -> { assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(max)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java index d47d9006b7f38..55cf9b16e1688 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.hamcrest.Description; import org.hamcrest.TypeSafeMatcher; @@ -71,7 +72,10 @@ private static CheckedConsumer {}, agg -> assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN))); + testCase(new MatchAllDocsQuery(), writer -> {}, agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); + assertFalse(AggregationInspectionHelper.hasValue(agg)); + }); } public void testNoMatchingField() throws IOException { @@ -81,7 +85,10 @@ public void testNoMatchingField() throws IOException { writer.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); writer.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 2))); }, - agg -> assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)) + agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); + assertFalse(AggregationInspectionHelper.hasValue(agg)); + } ); } @@ -94,7 +101,10 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { sample.add(point); return singleton(new SortedNumericDocValuesField("number", point)); }), - agg -> assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(sample))) + agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(sample))); + assertTrue(AggregationInspectionHelper.hasValue(agg)); + } ); } @@ -107,7 +117,10 @@ public void testSomeMatchesNumericDocValues() throws IOException { sample.add(point); return singleton(new NumericDocValuesField("number", point)); }), - agg -> assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(sample))) + agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(sample))); + assertTrue(AggregationInspectionHelper.hasValue(agg)); + } ); } @@ -123,7 +136,10 @@ public void testQueryFiltering() throws IOException { writer.addDocument(Arrays.asList(new IntPoint("number", point), new SortedNumericDocValuesField("number", point))); } }, - agg -> assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(filteredSample))) + agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(filteredSample))); + assertTrue(AggregationInspectionHelper.hasValue(agg)); + } ); } @@ -134,7 +150,10 @@ public void testQueryFiltersAll() throws IOException { writer.addDocument(Arrays.asList(new IntPoint("number", 1), new SortedNumericDocValuesField("number", 1))); writer.addDocument(Arrays.asList(new IntPoint("number", 2), new SortedNumericDocValuesField("number", 2))); }, - agg -> assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)) + agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); + assertFalse(AggregationInspectionHelper.hasValue(agg)); + } ); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java index ad897a2ef3264..cfe3c86034f85 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java @@ -50,6 +50,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.FieldContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -116,6 +117,7 @@ public void testMinAggregator_numericDv() throws Exception { aggregator.postCollection(); InternalMin result = (InternalMin) aggregator.buildAggregation(0L); assertEquals(-1.0, result.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(result)); indexReader.close(); directory.close(); @@ -157,6 +159,7 @@ public void testMinAggregator_sortedNumericDv() throws Exception { aggregator.postCollection(); InternalMin result = (InternalMin) aggregator.buildAggregation(0L); assertEquals(-1.0, result.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(result)); indexReader.close(); directory.close(); @@ -189,6 +192,7 @@ public void testMinAggregator_noValue() throws Exception { aggregator.postCollection(); InternalMin result = (InternalMin) aggregator.buildAggregation(0L); assertEquals(Double.POSITIVE_INFINITY, result.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(result)); indexReader.close(); directory.close(); @@ -212,6 +216,7 @@ public void testMinAggregator_noDocs() throws Exception { aggregator.postCollection(); InternalMin result = (InternalMin) aggregator.buildAggregation(0L); assertEquals(Double.POSITIVE_INFINITY, result.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(result)); indexReader.close(); directory.close(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java index 52a45f9c017d1..28b1514545506 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.function.Consumer; @@ -51,6 +52,7 @@ public void testEmpty() throws IOException { assertEquals(Float.NaN, stats.getAvg(), 0); assertEquals(Double.POSITIVE_INFINITY, stats.getMin(), 0); assertEquals(Double.NEGATIVE_INFINITY, stats.getMax(), 0); + assertFalse(AggregationInspectionHelper.hasValue(stats)); } ); } @@ -81,6 +83,7 @@ public void testRandomDoubles() throws IOException { assertEquals(expected.min, stats.getMin(), 0); assertEquals(expected.max, stats.getMax(), 0); assertEquals(expected.sum / expected.count, stats.getAvg(), TOLERANCE); + assertTrue(AggregationInspectionHelper.hasValue(stats)); } ); } @@ -110,6 +113,7 @@ public void testRandomLongs() throws IOException { assertEquals(expected.min, stats.getMin(), 0); assertEquals(expected.max, stats.getMax(), 0); assertEquals(expected.sum / expected.count, stats.getAvg(), TOLERANCE); + assertTrue(AggregationInspectionHelper.hasValue(stats)); } ); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java index eb57bc9a5115c..ff76aa4d0edef 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; @@ -53,14 +54,20 @@ public class SumAggregatorTests extends AggregatorTestCase { public void testNoDocs() throws IOException { testCase(new MatchAllDocsQuery(), iw -> { // Intentionally not writing any docs - }, count -> assertEquals(0L, count.getValue(), 0d)); + }, count -> { + assertEquals(0L, count.getValue(), 0d); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); } public void testNoMatchingField() throws IOException { testCase(new MatchAllDocsQuery(), iw -> { iw.addDocument(singleton(new NumericDocValuesField("wrong_number", 7))); iw.addDocument(singleton(new NumericDocValuesField("wrong_number", 1))); - }, count -> assertEquals(0L, count.getValue(), 0d)); + }, count -> { + assertEquals(0L, count.getValue(), 0d); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); } public void testNumericDocValues() throws IOException { @@ -81,7 +88,10 @@ public void testNumericDocValues() throws IOException { iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2))); iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1))); iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2))); - }, count -> assertEquals(24L, count.getValue(), 0d)); + }, count -> { + assertEquals(24L, count.getValue(), 0d); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testSortedNumericDocValues() throws IOException { @@ -91,7 +101,10 @@ public void testSortedNumericDocValues() throws IOException { iw.addDocument(Arrays.asList(new SortedNumericDocValuesField(FIELD_NAME, 3), new SortedNumericDocValuesField(FIELD_NAME, 4))); iw.addDocument(singleton(new SortedNumericDocValuesField(FIELD_NAME, 1))); - }, count -> assertEquals(15L, count.getValue(), 0d)); + }, count -> { + assertEquals(15L, count.getValue(), 0d); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testQueryFiltering() throws IOException { @@ -101,14 +114,20 @@ public void testQueryFiltering() throws IOException { iw.addDocument(Arrays.asList(new StringField("match", "yes", Field.Store.NO), new NumericDocValuesField(FIELD_NAME, 3))); iw.addDocument(Arrays.asList(new StringField("match", "no", Field.Store.NO), new NumericDocValuesField(FIELD_NAME, 4))); iw.addDocument(Arrays.asList(new StringField("match", "yes", Field.Store.NO), new NumericDocValuesField(FIELD_NAME, 5))); - }, count -> assertEquals(9L, count.getValue(), 0d)); + }, count -> { + assertEquals(9L, count.getValue(), 0d); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testStringField() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> { testCase(new MatchAllDocsQuery(), iw -> { iw.addDocument(singleton(new SortedDocValuesField(FIELD_NAME, new BytesRef("1")))); - }, count -> assertEquals(0L, count.getValue(), 0d)); + }, count -> { + assertEquals(0L, count.getValue(), 0d); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); }); assertEquals("unexpected docvalues type SORTED for field 'field' (expected one of [SORTED_NUMERIC, NUMERIC]). " + "Re-index with correct docvalues type.", e.getMessage()); @@ -159,13 +178,13 @@ private void verifySummationOfDoubles(double[] values, double expected, double d private void testCase(Query query, CheckedConsumer indexer, - Consumer verify) throws IOException { + Consumer verify) throws IOException { testCase(query, indexer, verify, NumberFieldMapper.NumberType.LONG); } private void testCase(Query query, CheckedConsumer indexer, - Consumer verify, + Consumer verify, NumberFieldMapper.NumberType fieldNumberType) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { @@ -187,7 +206,7 @@ private void testCase(Query query, indexSearcher.search(query, aggregator); aggregator.postCollection(); - verify.accept((Sum) aggregator.buildAggregation(0L)); + verify.accept((InternalSum) aggregator.buildAggregation(0L)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java index 363ba14198390..2541583e94580 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.aggregations.metrics.PercentileRanks; import org.elasticsearch.search.aggregations.metrics.PercentileRanksAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.hamcrest.Matchers; import java.io.IOException; @@ -55,6 +56,7 @@ public void testEmpty() throws IOException { Percentile rank = ranks.iterator().next(); assertEquals(Double.NaN, rank.getPercent(), 0d); assertEquals(0.5, rank.getValue(), 0d); + assertFalse(AggregationInspectionHelper.hasValue(((InternalTDigestPercentileRanks)ranks))); } } @@ -91,6 +93,7 @@ public void testSimple() throws IOException { // https://github.com/elastic/elasticsearch/issues/14851 // assertThat(rank.getPercent(), Matchers.equalTo(100d)); assertFalse(rankIterator.hasNext()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTDigestPercentileRanks)ranks))); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java index 8a4f399cb2525..0b1692fa6133c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.metrics.PercentilesAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; import org.elasticsearch.search.aggregations.metrics.TDigestPercentilesAggregator; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.function.Consumer; @@ -52,6 +53,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, tdigest -> { assertEquals(0L, tdigest.state.size()); + assertFalse(AggregationInspectionHelper.hasValue(tdigest)); }); } @@ -61,6 +63,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); }, tdigest -> { assertEquals(0L, tdigest.state.size()); + assertFalse(AggregationInspectionHelper.hasValue(tdigest)); }); } @@ -82,6 +85,7 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { assertEquals("2.0", tdigest.percentileAsString(50)); assertEquals(1.0d, tdigest.percentile(22), 0.0d); assertEquals("1.0", tdigest.percentileAsString(22)); + assertTrue(AggregationInspectionHelper.hasValue(tdigest)); }); } @@ -107,6 +111,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { assertEquals("1.0", tdigest.percentileAsString(25)); assertEquals(0.0d, tdigest.percentile(1), 0.0d); assertEquals("0.0", tdigest.percentileAsString(1)); + assertTrue(AggregationInspectionHelper.hasValue(tdigest)); }); } @@ -127,11 +132,13 @@ public void testQueryFiltering() throws IOException { assertEquals(2.0d, tdigest.percentile(100), 0.0d); assertEquals(1.0d, tdigest.percentile(50), 0.0d); assertEquals(0.5d, tdigest.percentile(25), 0.0d); + assertTrue(AggregationInspectionHelper.hasValue(tdigest)); }); testCase(LongPoint.newRangeQuery("row", 100, 110), docs, tdigest -> { assertEquals(0L, tdigest.state.size()); assertEquals(0L, tdigest.state.centroidCount()); + assertFalse(AggregationInspectionHelper.hasValue(tdigest)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java index 921f29c915f51..585cd7f9ff434 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java @@ -50,6 +50,7 @@ import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.TopHits; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.sort.SortOrder; import java.io.IOException; @@ -74,12 +75,14 @@ public void testTopLevel() throws Exception { assertEquals("type", searchHits.getAt(1).getType()); assertEquals("1", searchHits.getAt(2).getId()); assertEquals("type", searchHits.getAt(2).getType()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits)result))); } public void testNoResults() throws Exception { TopHits result = (TopHits) testCase(new MatchNoDocsQuery(), topHits("_name").sort("string", SortOrder.DESC)); SearchHits searchHits = ((TopHits) result).getHits(); assertEquals(0L, searchHits.getTotalHits().value); + assertFalse(AggregationInspectionHelper.hasValue(((InternalTopHits)result))); } /** @@ -106,22 +109,26 @@ public void testInsideTerms() throws Exception { assertEquals(2L, searchHits.getTotalHits().value); assertEquals("2", searchHits.getAt(0).getId()); assertEquals("1", searchHits.getAt(1).getId()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits) terms.getBucketByKey("a").getAggregations().get("top")))); // The "b" bucket searchHits = ((TopHits) terms.getBucketByKey("b").getAggregations().get("top")).getHits(); assertEquals(2L, searchHits.getTotalHits().value); assertEquals("3", searchHits.getAt(0).getId()); assertEquals("1", searchHits.getAt(1).getId()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits) terms.getBucketByKey("b").getAggregations().get("top")))); // The "c" bucket searchHits = ((TopHits) terms.getBucketByKey("c").getAggregations().get("top")).getHits(); assertEquals(1L, searchHits.getTotalHits().value); assertEquals("2", searchHits.getAt(0).getId()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits) terms.getBucketByKey("c").getAggregations().get("top")))); // The "d" bucket searchHits = ((TopHits) terms.getBucketByKey("d").getAggregations().get("top")).getHits(); assertEquals(1L, searchHits.getTotalHits().value); assertEquals("3", searchHits.getAt(0).getId()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits) terms.getBucketByKey("d").getAggregations().get("top")))); } private static final MappedFieldType STRING_FIELD_TYPE = new KeywordFieldMapper.KeywordFieldType(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index ce8d9c2da834f..4a5982b9dacfa 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.MockScriptPlugin; @@ -83,6 +84,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -578,6 +580,7 @@ public void testFieldCollapsing() throws Exception { } public void testFetchFeatures() { + final boolean seqNoAndTerm = randomBoolean(); SearchResponse response = client().prepareSearch("idx") .setQuery(matchQuery("text", "text").queryName("test")) .addAggregation(terms("terms") @@ -593,6 +596,7 @@ public void testFetchFeatures() { new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) .fetchSource("text", null) .version(true) + .seqNoAndPrimaryTerm(seqNoAndTerm) ) ) .get(); @@ -620,6 +624,14 @@ public void testFetchFeatures() { long version = hit.getVersion(); assertThat(version, equalTo(1L)); + if (seqNoAndTerm) { + assertThat(hit.getSeqNo(), greaterThanOrEqualTo(0L)); + assertThat(hit.getPrimaryTerm(), greaterThanOrEqualTo(1L)); + } else { + assertThat(hit.getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); + assertThat(hit.getPrimaryTerm(), equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); + } + assertThat(hit.getMatchedQueries()[0], equalTo("test")); DocumentField field = hit.field("field1"); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java index caf1d4ef4ee47..6ec1ea1cad301 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java @@ -54,6 +54,9 @@ protected final TopHitsAggregationBuilder createTestAggregatorBuilder() { if (randomBoolean()) { factory.version(randomBoolean()); } + if (randomBoolean()) { + factory.seqNoAndPrimaryTerm(randomBoolean()); + } if (randomBoolean()) { factory.trackScores(randomBoolean()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java index f9118e30a6efd..ea14d9ec671b2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java @@ -35,15 +35,13 @@ import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.BooleanFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.IpFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; -import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.ValueCount; -import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ValueCountAggregator; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; @@ -60,7 +58,10 @@ public void testNoDocs() throws IOException { for (ValueType valueType : ValueType.values()) { testCase(new MatchAllDocsQuery(), valueType, iw -> { // Intentionally not writing any docs - }, count -> assertEquals(0L, count.getValue())); + }, count -> { + assertEquals(0L, count.getValue()); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); } } @@ -68,7 +69,10 @@ public void testNoMatchingField() throws IOException { testCase(new MatchAllDocsQuery(), ValueType.LONG, iw -> { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 7))); iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); - }, count -> assertEquals(0L, count.getValue())); + }, count -> { + assertEquals(0L, count.getValue()); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); } public void testSomeMatchesSortedNumericDocValues() throws IOException { @@ -76,14 +80,20 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 7))); iw.addDocument(singleton(new SortedNumericDocValuesField(FIELD_NAME, 7))); iw.addDocument(singleton(new SortedNumericDocValuesField(FIELD_NAME, 1))); - }, count -> assertEquals(2L, count.getValue())); + }, count -> { + assertEquals(2L, count.getValue()); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testSomeMatchesNumericDocValues() throws IOException { testCase(new DocValuesFieldExistsQuery(FIELD_NAME), ValueType.NUMBER, iw -> { iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 7))); iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1))); - }, count -> assertEquals(2L, count.getValue())); + }, count -> { + assertEquals(2L, count.getValue()); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testQueryFiltering() throws IOException { @@ -93,20 +103,26 @@ public void testQueryFiltering() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("level", 3), new SortedDocValuesField(FIELD_NAME, new BytesRef("foo")))); iw.addDocument(Arrays.asList(new IntPoint("level", 5), new SortedDocValuesField(FIELD_NAME, new BytesRef("baz")))); iw.addDocument(Arrays.asList(new IntPoint("level", 7), new SortedDocValuesField(FIELD_NAME, new BytesRef("baz")))); - }, count -> assertEquals(4L, count.getValue())); + }, count -> { + assertEquals(4L, count.getValue()); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testQueryFiltersAll() throws IOException { testCase(IntPoint.newRangeQuery("level", -1, 0), ValueType.STRING, iw -> { iw.addDocument(Arrays.asList(new IntPoint("level", 3), new SortedDocValuesField(FIELD_NAME, new BytesRef("foo")))); iw.addDocument(Arrays.asList(new IntPoint("level", 5), new SortedDocValuesField(FIELD_NAME, new BytesRef("baz")))); - }, count -> assertEquals(0L, count.getValue())); + }, count -> { + assertEquals(0L, count.getValue()); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); } private void testCase(Query query, ValueType valueType, CheckedConsumer indexer, - Consumer verify) throws IOException { + Consumer verify) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { @@ -127,7 +143,7 @@ private void testCase(Query query, aggregator.preCollection(); indexSearcher.search(query, aggregator); aggregator.postCollection(); - verify.accept((ValueCount) aggregator.buildAggregation(0L)); + verify.accept((InternalValueCount) aggregator.buildAggregation(0L)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java index 3836f0cc2ae14..78c4d18218eea 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java @@ -39,10 +39,11 @@ import org.elasticsearch.search.aggregations.metrics.InternalWeightedAvg; import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregator; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneOffset; import java.util.Arrays; import java.util.Collections; import java.util.function.Consumer; @@ -63,6 +64,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -77,6 +79,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 3))); }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -95,6 +98,7 @@ public void testSomeMatchesSortedNumericDocValuesNoWeight() throws IOException { new SortedNumericDocValuesField("weight_field", 1))); }, avg -> { assertEquals(4, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -115,6 +119,7 @@ public void testSomeMatchesSortedNumericDocValuesWeights() throws IOException { }, avg -> { // (7*2 + 2*3 + 3*3) / (2+3+3) == 3.625 assertEquals(3.625, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -133,6 +138,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { new SortedNumericDocValuesField("weight_field", 1))); }, avg -> { assertEquals(4, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -151,6 +157,7 @@ public void testQueryFiltering() throws IOException { new SortedNumericDocValuesField("weight_field", 1))); }, avg -> { assertEquals(2.5, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -170,6 +177,7 @@ public void testQueryFilteringWeights() throws IOException { }, avg -> { double value = (2.0*3.0 + 3.0*4.0) / (3.0+4.0); assertEquals(value, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -185,6 +193,7 @@ public void testQueryFiltersAll() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("value_field", 3), new SortedNumericDocValuesField("value_field", 7))); }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -203,6 +212,7 @@ public void testQueryFiltersAllWeights() throws IOException { new SortedNumericDocValuesField("weight_field", 4))); }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -222,6 +232,7 @@ public void testValueSetMissing() throws IOException { }, avg -> { double value = (2.0*2.0 + 2.0*3.0 + 2.0*4.0) / (2.0+3.0+4.0); assertEquals(value, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -241,6 +252,7 @@ public void testWeightSetMissing() throws IOException { }, avg -> { double value = (2.0*2.0 + 3.0*2.0 + 4.0*2.0) / (2.0+2.0+2.0); assertEquals(value, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -248,7 +260,7 @@ public void testWeightSetTimezone() throws IOException { MultiValuesSourceFieldConfig valueConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("value_field").build(); MultiValuesSourceFieldConfig weightConfig = new MultiValuesSourceFieldConfig.Builder() .setFieldName("weight_field") - .setTimeZone(DateTimeZone.UTC) + .setTimeZone(ZoneOffset.UTC) .build(); WeightedAvgAggregationBuilder aggregationBuilder = new WeightedAvgAggregationBuilder("_name") .value(valueConfig) @@ -271,7 +283,7 @@ public void testWeightSetTimezone() throws IOException { public void testValueSetTimezone() throws IOException { MultiValuesSourceFieldConfig valueConfig = new MultiValuesSourceFieldConfig.Builder() .setFieldName("value_field") - .setTimeZone(DateTimeZone.UTC) + .setTimeZone(ZoneOffset.UTC) .build(); MultiValuesSourceFieldConfig weightConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("weight_field").build(); WeightedAvgAggregationBuilder aggregationBuilder = new WeightedAvgAggregationBuilder("_name") @@ -311,6 +323,7 @@ public void testMultiValues() throws IOException { }, avg -> { double value = (((2.0+3.0)/2.0) + ((3.0+4.0)/2.0) + ((4.0+5.0)/2.0)) / (1.0+1.0+1.0); assertEquals(value, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java index 59af941812175..3ed1a15603e84 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -40,9 +41,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalAvg; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregator; import java.io.IOException; import java.util.ArrayList; @@ -141,8 +139,7 @@ public void testSameAggNames() throws IOException { } } - private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java index 7cb4371354c3b..22a4fdbdf67bf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java @@ -31,9 +31,9 @@ import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; -import org.joda.time.DateTime; import java.io.IOException; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -132,10 +132,10 @@ public void testEmptyBucketSort() { assertThat(histogram, notNullValue()); // These become our baseline List timeBuckets = histogram.getBuckets(); - DateTime previousKey = (DateTime) timeBuckets.get(0).getKey(); + ZonedDateTime previousKey = (ZonedDateTime) timeBuckets.get(0).getKey(); for (Histogram.Bucket timeBucket : timeBuckets) { - assertThat(previousKey, lessThanOrEqualTo((DateTime) timeBucket.getKey())); - previousKey = (DateTime) timeBucket.getKey(); + assertThat(previousKey, lessThanOrEqualTo((ZonedDateTime) timeBucket.getKey())); + previousKey = (ZonedDateTime) timeBucket.getKey(); } // Now let's test using size diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java index 37882bcdf5e44..4b23304e642c0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -47,6 +48,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalAvg; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; @@ -93,6 +95,7 @@ public void testSimple() throws IOException { for (Histogram.Bucket bucket : buckets) { sum += ((InternalAvg) (bucket.getAggregations().get("the_avg"))).value(); assertThat(((InternalSimpleValue) (bucket.getAggregations().get("cusum"))).value(), equalTo(sum)); + assertTrue(AggregationInspectionHelper.hasValue(((InternalAvg) (bucket.getAggregations().get("the_avg"))))); } }); } @@ -116,14 +119,37 @@ public void testDerivative() throws IOException { for (int i = 0; i < buckets.size(); i++) { if (i == 0) { assertThat(((InternalSimpleValue)(buckets.get(i).getAggregations().get("cusum"))).value(), equalTo(0.0)); + assertTrue(AggregationInspectionHelper.hasValue(((InternalSimpleValue) (buckets.get(i) + .getAggregations().get("cusum"))))); } else { sum += 1.0; assertThat(((InternalSimpleValue)(buckets.get(i).getAggregations().get("cusum"))).value(), equalTo(sum)); + assertTrue(AggregationInspectionHelper.hasValue(((InternalSimpleValue) (buckets.get(i) + .getAggregations().get("cusum"))))); } } }); } + public void testCount() throws IOException { + Query query = new MatchAllDocsQuery(); + + DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo"); + aggBuilder.dateHistogramInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); + aggBuilder.subAggregation(new CumulativeSumPipelineAggregationBuilder("cusum", "_count")); + + executeTestCase(query, aggBuilder, histogram -> { + assertEquals(10, ((Histogram)histogram).getBuckets().size()); + List buckets = ((Histogram)histogram).getBuckets(); + double sum = 1.0; + for (Histogram.Bucket bucket : buckets) { + assertThat(((InternalSimpleValue) (bucket.getAggregations().get("cusum"))).value(), equalTo(sum)); + assertTrue(AggregationInspectionHelper.hasValue(((InternalSimpleValue) (bucket.getAggregations().get("cusum"))))); + sum += 1.0; + } + }); + } + public void testDocCount() throws IOException { Query query = new MatchAllDocsQuery(); @@ -340,6 +366,6 @@ private void executeTestCase(Query query, AggregationBuilder aggBuilder, Consume } private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java index 62fe7a2a45a60..db1ee6ab18916 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -21,7 +21,8 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -31,12 +32,14 @@ import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matcher; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; import org.junit.After; import java.io.IOException; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -60,19 +63,11 @@ public class DateDerivativeIT extends ESIntegTestCase { private static final String IDX_DST_END = "idx_dst_end"; private static final String IDX_DST_KATHMANDU = "idx_dst_kathmandu"; - private DateTime date(int month, int day) { - return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC); + private ZonedDateTime date(int month, int day) { + return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } - private DateTime date(String date) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date); - } - - private static String format(DateTime date, String pattern) { - return DateTimeFormat.forPattern(pattern).print(date); - } - - private static IndexRequestBuilder indexDoc(String idx, DateTime date, int value) throws Exception { + private static IndexRequestBuilder indexDoc(String idx, ZonedDateTime date, int value) throws Exception { return client().prepareIndex(idx, "type").setSource( jsonBuilder().startObject().timeField("date", date).field("value", value).endObject()); } @@ -124,27 +119,27 @@ public void testSingleValuedField() throws Exception { List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, nullValue()); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(1d)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); @@ -166,28 +161,28 @@ public void testSingleValuedFieldNormalised() throws Exception { List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); Derivative docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, nullValue()); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), closeTo(1d, 0.00001)); assertThat(docCountDeriv.normalizedValue(), closeTo(1d / 31d, 0.00001)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); @@ -202,11 +197,14 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstStart() throws Excep createIndex(IDX_DST_START); List builders = new ArrayList<>(); - DateTimeZone timezone = DateTimeZone.forID("CET"); - addNTimes(1, IDX_DST_START, new DateTime("2012-03-24T01:00:00", timezone), builders); - addNTimes(2, IDX_DST_START, new DateTime("2012-03-25T01:00:00", timezone), builders); // day with dst shift, only 23h long - addNTimes(3, IDX_DST_START, new DateTime("2012-03-26T01:00:00", timezone), builders); - addNTimes(4, IDX_DST_START, new DateTime("2012-03-27T01:00:00", timezone), builders); + ZoneId timezone = ZoneId.of("CET"); + DateFormatter formatter = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss").withZone(timezone); + // epoch millis: 1332547200000 + addNTimes(1, IDX_DST_START, DateFormatters.toZonedDateTime(formatter.parse("2012-03-24T01:00:00")), builders); + // day with dst shift, only 23h long + addNTimes(2, IDX_DST_START, DateFormatters.toZonedDateTime(formatter.parse("2012-03-25T01:00:00")), builders); + addNTimes(3, IDX_DST_START, DateFormatters.toZonedDateTime(formatter.parse("2012-03-26T01:00:00")), builders); + addNTimes(4, IDX_DST_START, DateFormatters.toZonedDateTime(formatter.parse("2012-03-27T01:00:00")), builders); indexRandom(true, builders); ensureSearchable(); @@ -225,11 +223,23 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstStart() throws Excep List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(4)); - assertBucket(buckets.get(0), new DateTime("2012-03-24", timezone).toDateTime(DateTimeZone.UTC), 1L, nullValue(), null, null); - assertBucket(buckets.get(1), new DateTime("2012-03-25", timezone).toDateTime(DateTimeZone.UTC), 2L, notNullValue(), 1d, 1d / 24d); + DateFormatter dateFormatter = DateFormatter.forPattern("yyyy-MM-dd"); + ZonedDateTime expectedKeyFirstBucket = + LocalDate.from(dateFormatter.parse("2012-03-24")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(0), expectedKeyFirstBucket, 1L, nullValue(), null, null); + + ZonedDateTime expectedKeySecondBucket = + LocalDate.from(dateFormatter.parse("2012-03-25")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(1), expectedKeySecondBucket,2L, notNullValue(), 1d, 1d / 24d); + // the following is normalized using a 23h bucket width - assertBucket(buckets.get(2), new DateTime("2012-03-26", timezone).toDateTime(DateTimeZone.UTC), 3L, notNullValue(), 1d, 1d / 23d); - assertBucket(buckets.get(3), new DateTime("2012-03-27", timezone).toDateTime(DateTimeZone.UTC), 4L, notNullValue(), 1d, 1d / 24d); + ZonedDateTime expectedKeyThirdBucket = + LocalDate.from(dateFormatter.parse("2012-03-26")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(2), expectedKeyThirdBucket, 3L, notNullValue(), 1d, 1d / 23d); + + ZonedDateTime expectedKeyFourthBucket = + LocalDate.from(dateFormatter.parse("2012-03-27")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(3), expectedKeyFourthBucket, 4L, notNullValue(), 1d, 1d / 24d); } /** @@ -237,13 +247,15 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstStart() throws Excep */ public void testSingleValuedFieldNormalised_timeZone_CET_DstEnd() throws Exception { createIndex(IDX_DST_END); - DateTimeZone timezone = DateTimeZone.forID("CET"); + ZoneId timezone = ZoneId.of("CET"); List builders = new ArrayList<>(); - addNTimes(1, IDX_DST_END, new DateTime("2012-10-27T01:00:00", timezone), builders); - addNTimes(2, IDX_DST_END, new DateTime("2012-10-28T01:00:00", timezone), builders); // day with dst shift -1h, 25h long - addNTimes(3, IDX_DST_END, new DateTime("2012-10-29T01:00:00", timezone), builders); - addNTimes(4, IDX_DST_END, new DateTime("2012-10-30T01:00:00", timezone), builders); + DateFormatter formatter = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss").withZone(timezone); + addNTimes(1, IDX_DST_END, DateFormatters.toZonedDateTime(formatter.parse("2012-10-27T01:00:00")), builders); + // day with dst shift -1h, 25h long + addNTimes(2, IDX_DST_END, DateFormatters.toZonedDateTime(formatter.parse("2012-10-28T01:00:00")), builders); + addNTimes(3, IDX_DST_END, DateFormatters.toZonedDateTime(formatter.parse("2012-10-29T01:00:00")), builders); + addNTimes(4, IDX_DST_END, DateFormatters.toZonedDateTime(formatter.parse("2012-10-30T01:00:00")), builders); indexRandom(true, builders); ensureSearchable(); @@ -262,11 +274,24 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstEnd() throws Excepti List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(4)); - assertBucket(buckets.get(0), new DateTime("2012-10-27", timezone).toDateTime(DateTimeZone.UTC), 1L, nullValue(), null, null); - assertBucket(buckets.get(1), new DateTime("2012-10-28", timezone).toDateTime(DateTimeZone.UTC), 2L, notNullValue(), 1d, 1d / 24d); + DateFormatter dateFormatter = DateFormatter.forPattern("yyyy-MM-dd").withZone(ZoneOffset.UTC); + + ZonedDateTime expectedKeyFirstBucket = + LocalDate.from(dateFormatter.parse("2012-10-27")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(0), expectedKeyFirstBucket, 1L, nullValue(), null, null); + + ZonedDateTime expectedKeySecondBucket = + LocalDate.from(dateFormatter.parse("2012-10-28")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(1), expectedKeySecondBucket, 2L, notNullValue(), 1d, 1d / 24d); + // the following is normalized using a 25h bucket width - assertBucket(buckets.get(2), new DateTime("2012-10-29", timezone).toDateTime(DateTimeZone.UTC), 3L, notNullValue(), 1d, 1d / 25d); - assertBucket(buckets.get(3), new DateTime("2012-10-30", timezone).toDateTime(DateTimeZone.UTC), 4L, notNullValue(), 1d, 1d / 24d); + ZonedDateTime expectedKeyThirdBucket = + LocalDate.from(dateFormatter.parse("2012-10-29")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(2), expectedKeyThirdBucket, 3L, notNullValue(), 1d, 1d / 25d); + + ZonedDateTime expectedKeyFourthBucket = + LocalDate.from(dateFormatter.parse("2012-10-30")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(3), expectedKeyFourthBucket, 4L, notNullValue(), 1d, 1d / 24d); } /** @@ -275,14 +300,15 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstEnd() throws Excepti */ public void testSingleValuedFieldNormalised_timeZone_AsiaKathmandu() throws Exception { createIndex(IDX_DST_KATHMANDU); - DateTimeZone timezone = DateTimeZone.forID("Asia/Kathmandu"); + ZoneId timezone = ZoneId.of("Asia/Kathmandu"); List builders = new ArrayList<>(); - addNTimes(1, IDX_DST_KATHMANDU, new DateTime("1985-12-31T22:30:00", timezone), builders); + DateFormatter formatter = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss").withZone(timezone); + addNTimes(1, IDX_DST_KATHMANDU, DateFormatters.toZonedDateTime(formatter.parse("1985-12-31T22:30:00")), builders); // the shift happens during the next bucket, which includes the 45min that do not start on the full hour - addNTimes(2, IDX_DST_KATHMANDU, new DateTime("1985-12-31T23:30:00", timezone), builders); - addNTimes(3, IDX_DST_KATHMANDU, new DateTime("1986-01-01T01:30:00", timezone), builders); - addNTimes(4, IDX_DST_KATHMANDU, new DateTime("1986-01-01T02:30:00", timezone), builders); + addNTimes(2, IDX_DST_KATHMANDU, DateFormatters.toZonedDateTime(formatter.parse("1985-12-31T23:30:00")), builders); + addNTimes(3, IDX_DST_KATHMANDU, DateFormatters.toZonedDateTime(formatter.parse("1986-01-01T01:30:00")), builders); + addNTimes(4, IDX_DST_KATHMANDU, DateFormatters.toZonedDateTime(formatter.parse("1986-01-01T02:30:00")), builders); indexRandom(true, builders); ensureSearchable(); @@ -301,27 +327,36 @@ public void testSingleValuedFieldNormalised_timeZone_AsiaKathmandu() throws Exce List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(4)); - assertBucket(buckets.get(0), new DateTime("1985-12-31T22:00:00", timezone).toDateTime(DateTimeZone.UTC), 1L, nullValue(), null, - null); - assertBucket(buckets.get(1), new DateTime("1985-12-31T23:00:00", timezone).toDateTime(DateTimeZone.UTC), 2L, notNullValue(), 1d, - 1d / 60d); + DateFormatter dateFormatter = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss").withZone(ZoneOffset.UTC); + + ZonedDateTime expectedKeyFirstBucket = + LocalDateTime.from(dateFormatter.parse("1985-12-31T22:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(0), expectedKeyFirstBucket, 1L, nullValue(), null,null); + + ZonedDateTime expectedKeySecondBucket = + LocalDateTime.from(dateFormatter.parse("1985-12-31T23:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(1), expectedKeySecondBucket, 2L, notNullValue(), 1d,1d / 60d); + // the following is normalized using a 105min bucket width - assertBucket(buckets.get(2), new DateTime("1986-01-01T01:00:00", timezone).toDateTime(DateTimeZone.UTC), 3L, notNullValue(), 1d, - 1d / 105d); - assertBucket(buckets.get(3), new DateTime("1986-01-01T02:00:00", timezone).toDateTime(DateTimeZone.UTC), 4L, notNullValue(), 1d, - 1d / 60d); + ZonedDateTime expectedKeyThirdBucket = + LocalDateTime.from(dateFormatter.parse("1986-01-01T01:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(2), expectedKeyThirdBucket, 3L, notNullValue(), 1d,1d / 105d); + + ZonedDateTime expectedKeyFourthBucket = + LocalDateTime.from(dateFormatter.parse("1986-01-01T02:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(3), expectedKeyFourthBucket, 4L, notNullValue(), 1d,1d / 60d); } - private static void addNTimes(int amount, String index, DateTime dateTime, List builders) throws Exception { + private static void addNTimes(int amount, String index, ZonedDateTime dateTime, List builders) throws Exception { for (int i = 0; i < amount; i++) { builders.add(indexDoc(index, dateTime, 1)); } } - private static void assertBucket(Histogram.Bucket bucket, DateTime expectedKey, long expectedDocCount, + private static void assertBucket(Histogram.Bucket bucket, ZonedDateTime expectedKey, long expectedDocCount, Matcher derivativeMatcher, Double derivative, Double normalizedDerivative) { assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(expectedKey)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(expectedKey)); assertThat(bucket.getDocCount(), equalTo(expectedDocCount)); Derivative docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, derivativeMatcher); @@ -350,10 +385,10 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)histo).getProperty("_count"); Object[] propertiesCounts = (Object[]) ((InternalAggregation)histo).getProperty("sum.value"); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); Sum sum = bucket.getAggregations().get("sum"); @@ -361,14 +396,14 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { assertThat(sum.getValue(), equalTo(1.0)); SimpleValue deriv = bucket.getAggregations().get("deriv"); assertThat(deriv, nullValue()); - assertThat((DateTime) propertiesKeys[0], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[0], equalTo(key)); assertThat((long) propertiesDocCounts[0], equalTo(1L)); assertThat((double) propertiesCounts[0], equalTo(1.0)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); sum = bucket.getAggregations().get("sum"); @@ -379,14 +414,14 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { assertThat(deriv.value(), equalTo(4.0)); assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty( "histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(4.0)); - assertThat((DateTime) propertiesKeys[1], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[1], equalTo(key)); assertThat((long) propertiesDocCounts[1], equalTo(2L)); assertThat((double) propertiesCounts[1], equalTo(5.0)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); sum = bucket.getAggregations().get("sum"); @@ -397,7 +432,7 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { assertThat(deriv.value(), equalTo(10.0)); assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty( "histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(10.0)); - assertThat((DateTime) propertiesKeys[2], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[2], equalTo(key)); assertThat((long) propertiesDocCounts[2], equalTo(3L)); assertThat((double) propertiesCounts[2], equalTo(15.0)); } @@ -417,39 +452,39 @@ public void testMultiValuedField() throws Exception { List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(4)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(true)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, nullValue()); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(2.0)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0,ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(5L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(2.0)); - key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(3); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); @@ -487,29 +522,29 @@ public void testPartiallyUnmapped() throws Exception { List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(true)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, nullValue()); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(1.0)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java index 341364edbcf32..b4ae26d5f13df 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -160,7 +161,7 @@ public double execute(Map params, double[] values) { } private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } /** diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java index a5a3007818f7f..fd8c0d58caf9c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.Rounding; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalOrder; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java index 5007784a3d9a9..b929f222d94fe 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java @@ -23,9 +23,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; import org.elasticsearch.test.AbstractSerializingTestCase; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +40,7 @@ protected MultiValuesSourceFieldConfig doParseInstance(XContentParser parser) th protected MultiValuesSourceFieldConfig createTestInstance() { String field = randomAlphaOfLength(10); Object missing = randomBoolean() ? randomAlphaOfLength(10) : null; - DateTimeZone timeZone = randomBoolean() ? randomDateTimeZone() : null; + ZoneId timeZone = randomBoolean() ? randomZone() : null; return new MultiValuesSourceFieldConfig.Builder() .setFieldName(field).setMissing(missing).setScript(null).setTimeZone(timeZone).build(); } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 03e2a8e8248b9..e111abe0d5132 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -35,6 +34,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -2815,9 +2815,11 @@ public void testHighlightQueryRewriteDatesWithNow() throws Exception { .setSettings(Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", 2)) .get()); ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); - indexRandom(true, client().prepareIndex("index-1", "type", "1").setSource("d", now, "field", "hello world"), - client().prepareIndex("index-1", "type", "2").setSource("d", now.minusDays(1), "field", "hello"), - client().prepareIndex("index-1", "type", "3").setSource("d", now.minusDays(2), "field", "world")); + DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time"); + indexRandom(true, + client().prepareIndex("index-1", "type", "1").setSource("d", formatter.format(now), "field", "hello world"), + client().prepareIndex("index-1", "type", "2").setSource("d", formatter.format(now.minusDays(1)), "field", "hello"), + client().prepareIndex("index-1", "type", "3").setSource("d", formatter.format(now.minusDays(2)), "field", "world")); ensureSearchable("index-1"); for (int i = 0; i < 5; i++) { final SearchResponse r1 = client().prepareSearch("index-1") diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index 8a90ca0b8ca47..fc69df5987aff 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -28,9 +28,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.document.DocumentField; -import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -557,7 +556,7 @@ public void testStoredFieldsWithoutSource() throws Exception { .field("long_field", 4L) .field("float_field", 5.0f) .field("double_field", 6.0d) - .field("date_field", DateFormatters.forPattern("dateOptionalTime").format(date)) + .field("date_field", DateFormatter.forPattern("dateOptionalTime").format(date)) .field("boolean_field", true) .field("binary_field", Base64.getEncoder().encodeToString("testing text".getBytes("UTF-8"))) .endObject()).get(); @@ -589,7 +588,7 @@ public void testStoredFieldsWithoutSource() throws Exception { assertThat(searchHit.getFields().get("long_field").getValue(), equalTo((Object) 4L)); assertThat(searchHit.getFields().get("float_field").getValue(), equalTo((Object) 5.0f)); assertThat(searchHit.getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); - String dateTime = DateFormatters.forPattern("dateOptionalTime").format(date); + String dateTime = DateFormatter.forPattern("dateOptionalTime").format(date); assertThat(searchHit.getFields().get("date_field").getValue(), equalTo((Object) dateTime)); assertThat(searchHit.getFields().get("boolean_field").getValue(), equalTo((Object) Boolean.TRUE)); assertThat(searchHit.getFields().get("binary_field").getValue(), equalTo(new BytesArray("testing text" .getBytes("UTF8")))); @@ -769,7 +768,7 @@ public void testDocValueFields() throws Exception { .field("long_field", 4L) .field("float_field", 5.0f) .field("double_field", 6.0d) - .field("date_field", DateFormatters.forPattern("dateOptionalTime").format(date)) + .field("date_field", DateFormatter.forPattern("dateOptionalTime").format(date)) .field("boolean_field", true) .field("binary_field", new byte[] {42, 100}) .field("ip_field", "::1") @@ -869,7 +868,7 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); assertThat(searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatters.forPattern("dateOptionalTime").format(date))); + equalTo(DateFormatter.forPattern("dateOptionalTime").format(date))); assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); @@ -898,9 +897,8 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo("4.0")); assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo("5.0")); assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo("6.0")); - // TODO: switch to java date formatter, but will require special casing java 8 as there is a bug with epoch formatting there assertThat(searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(Joda.forPattern("epoch_millis").format(date))); + equalTo(DateFormatter.forPattern("epoch_millis").format(date))); } public void testScriptFields() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index 35beb10934e3d..3cb71f6be235c 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -395,6 +395,9 @@ public void testQueryRandomGeoCollection() throws Exception { geoShapeQueryBuilder.relation(ShapeRelation.INTERSECTS); SearchResponse result = client().prepareSearch("test").setQuery(geoShapeQueryBuilder).get(); assertSearchResponse(result); + assumeTrue("Skipping the check for the polygon with a degenerated dimension until " + +" https://issues.apache.org/jira/browse/LUCENE-8634 is fixed", + randomPoly.maxLat - randomPoly.minLat > 8.4e-8 && randomPoly.maxLon - randomPoly.minLon > 8.4e-8); assertHitCount(result, 1); } @@ -423,7 +426,8 @@ public void testRandomGeoCollectionQuery() throws Exception { } gcb.shape(new PolygonBuilder(cb)); - logger.info("Created Random GeometryCollection containing {} shapes", gcb.numShapes()); + logger.info("Created Random GeometryCollection containing {} shapes using {} tree", gcb.numShapes(), + usePrefixTrees ? "default" : "quadtree"); if (usePrefixTrees == false) { client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape") @@ -444,7 +448,11 @@ public void testRandomGeoCollectionQuery() throws Exception { geoShapeQueryBuilder.relation(ShapeRelation.INTERSECTS); SearchResponse result = client().prepareSearch("test").setQuery(geoShapeQueryBuilder).get(); assertSearchResponse(result); - assertTrue(result.getHits().getTotalHits().value > 0); + assumeTrue("Skipping the check for the polygon with a degenerated dimension until " + +" https://issues.apache.org/jira/browse/LUCENE-8634 is fixed", + randomPoly.maxLat - randomPoly.minLat > 8.4e-8 && randomPoly.maxLon - randomPoly.minLon > 8.4e-8); + assertTrue("query: " + geoShapeQueryBuilder.toString() + " doc: " + Strings.toString(docSource), + result.getHits().getTotalHits().value > 0); } /** tests querying a random geometry collection with a point */ diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 0c2b0829c5f75..b7d51798ab7df 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -25,9 +25,11 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -50,11 +52,10 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.ISODateTimeFormat; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; @@ -433,14 +434,14 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { "type", "past", "type=date" )); - DateTimeZone timeZone = randomDateTimeZone(); - String now = ISODateTimeFormat.dateTime().print(new DateTime(timeZone)); - logger.info(" --> Using time_zone [{}], now is [{}]", timeZone.getID(), now); + ZoneId timeZone = randomZone(); + String now = DateFormatter.forPattern("strict_date_optional_time").format(Instant.now().atZone(timeZone)); + logger.info(" --> Using time_zone [{}], now is [{}]", timeZone.getId(), now); client().prepareIndex("test", "type", "1").setSource("past", now).get(); refresh(); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("past:[now-1m/m TO now+1m/m]") - .timeZone(timeZone.getID())).get(); + .timeZone(timeZone.getId())).get(); assertHitCount(searchResponse, 1L); } @@ -1555,21 +1556,20 @@ public void testQueryStringWithSlopAndFields() { } } - public void testDateProvidedAsNumber() throws ExecutionException, InterruptedException { + public void testDateProvidedAsNumber() throws InterruptedException { createIndex("test"); assertAcked(client().admin().indices().preparePutMapping("test").setType("type") - .setSource("field", "type=date,format=epoch_millis").get()); - indexRandom(true, client().prepareIndex("test", "type", "1").setSource("field", -1000000000001L), - client().prepareIndex("test", "type", "2").setSource("field", -1000000000000L), - client().prepareIndex("test", "type", "3").setSource("field", -999999999999L), - client().prepareIndex("test", "type", "4").setSource("field", -1000000000001.0123456789), - client().prepareIndex("test", "type", "5").setSource("field", -1000000000000.0123456789), - client().prepareIndex("test", "type", "6").setSource("field", -999999999999.0123456789)); - - - assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").lte(-1000000000000L)).get(), 4); - assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").lte(-999999999999L)).get(), 6); - + .setSource("field", "type=date,format=epoch_millis").get()); + indexRandom(true, + client().prepareIndex("test", "type", "1").setSource("field", 1000000000001L), + client().prepareIndex("test", "type", "2").setSource("field", 1000000000000L), + client().prepareIndex("test", "type", "3").setSource("field", 999999999999L), + client().prepareIndex("test", "type", "4").setSource("field", 1000000000002L), + client().prepareIndex("test", "type", "5").setSource("field", 1000000000003L), + client().prepareIndex("test", "type", "6").setSource("field", 999999999999L)); + + assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").gte(1000000000000L)).get(), 4); + assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").gte(999999999999L)).get(), 6); } public void testRangeQueryWithTimeZone() throws Exception { @@ -1582,7 +1582,7 @@ public void testRangeQueryWithTimeZone() throws Exception { client().prepareIndex("test", "type1", "3").setSource("date", "2014-01-01T01:00:00", "num", 3), // Now in UTC+1 client().prepareIndex("test", "type1", "4") - .setSource("date", DateTime.now(DateTimeZone.forOffsetHours(1)).getMillis(), "num", 4)); + .setSource("date", Instant.now().atZone(ZoneOffset.ofHours(1)).toInstant().toEpochMilli(), "num", 4)); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")) @@ -1634,12 +1634,6 @@ public void testRangeQueryWithTimeZone() throws Exception { assertHitCount(searchResponse, 1L); assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - // When we use long values, it means we have ms since epoch UTC based so we don't apply any transformation - expectThrows(SearchPhaseExecutionException.class, () -> - client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("date").from(1388534400000L).to(1388537940999L).timeZone("+01:00")) - .get()); - searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00")) .get(); @@ -1653,6 +1647,36 @@ public void testRangeQueryWithTimeZone() throws Exception { assertThat(searchResponse.getHits().getAt(0).getId(), is("4")); } + public void testRangeQueryWithLocaleMapping() throws Exception { + assumeTrue("need java 9 for testing ",JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0); + + assertAcked(prepareCreate("test") + .addMapping("type1", jsonBuilder().startObject().startObject("properties").startObject("date_field") + .field("type", "date") + .field("format", "E, d MMM yyyy HH:mm:ss Z") + .field("locale", "de") + .endObject().endObject().endObject())); + + indexRandom(true, + client().prepareIndex("test", "type1", "1").setSource("date_field", "Mi., 06 Dez. 2000 02:55:00 -0800"), + client().prepareIndex("test", "type1", "2").setSource("date_field", "Do., 07 Dez. 2000 02:55:00 -0800") + ); + + SearchResponse searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("date_field") + .gte("Di., 05 Dez. 2000 02:55:00 -0800") + .lte("Do., 07 Dez. 2000 00:00:00 -0800")) + .get(); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("date_field") + .gte("Di., 05 Dez. 2000 02:55:00 -0800") + .lte("Fr., 08 Dez. 2000 00:00:00 -0800")) + .get(); + assertHitCount(searchResponse, 2L); + } + public void testSearchEmptyDoc() { assertAcked(prepareCreate("test").setSettings("{\"index.analysis.analyzer.default.type\":\"keyword\"}", XContentType.JSON)); client().prepareIndex("test", "type1", "1").setSource("{}", XContentType.JSON).get(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java index b20db45894bec..8b750939238cb 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java @@ -24,11 +24,13 @@ import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; @@ -45,13 +47,14 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; import org.elasticsearch.cluster.coordination.CoordinationState; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.CoordinatorTests; import org.elasticsearch.cluster.coordination.DeterministicTaskQueue; import org.elasticsearch.cluster.coordination.InMemoryPersistedState; import org.elasticsearch.cluster.coordination.MockSinglePrioritizingExecutor; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -197,6 +200,53 @@ public void testSuccessfulSnapshot() { assertEquals(0, snapshotInfo.failedShards()); } + public void testConcurrentSnapshotCreateAndDelete() { + setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + + TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + final AtomicBoolean createdSnapshot = new AtomicBoolean(); + masterNode.client.admin().cluster().preparePutRepository(repoName) + .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute( + assertNoFailureListener( + () -> masterNode.client.admin().indices().create( + new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL).settings( + Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)), + assertNoFailureListener( + () -> masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .execute(assertNoFailureListener( + () -> masterNode.client.admin().cluster().deleteSnapshot( + new DeleteSnapshotRequest(repoName, snapshotName), + assertNoFailureListener(() -> masterNode.client.admin().cluster() + .prepareCreateSnapshot(repoName, snapshotName).execute( + assertNoFailureListener(() -> createdSnapshot.set(true)) + ))))))))); + + deterministicTaskQueue.runAllRunnableTasks(); + + assertTrue(createdSnapshot.get()); + SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); + final Repository repository = masterNode.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + + final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); + assertEquals(shards, snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + } + private void startCluster() { final ClusterState initialClusterState = new ClusterState.Builder(ClusterName.DEFAULT).nodes(testClusterNodes.randomDiscoveryNodes()).build(); @@ -205,14 +255,10 @@ private void startCluster() { deterministicTaskQueue.advanceTime(); deterministicTaskQueue.runAllRunnableTasks(); - final BootstrapConfiguration bootstrapConfiguration = new BootstrapConfiguration( - testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()) - .map(node -> new BootstrapConfiguration.NodeDescription(node.node)) - .distinct() - .collect(Collectors.toList())); + final VotingConfiguration votingConfiguration = new VotingConfiguration(testClusterNodes.nodes.values().stream().map(n -> n.node) + .filter(DiscoveryNode::isMasterNode).map(DiscoveryNode::getId).collect(Collectors.toSet())); testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()).forEach( - testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(bootstrapConfiguration) - ); + testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(votingConfiguration)); runUntil( () -> { @@ -529,6 +575,11 @@ allocationService, new AliasValidator(), environment, indexScopedSettings, transportService, clusterService, threadPool, snapshotsService, actionFilters, indexNameExpressionResolver )); + actions.put(DeleteSnapshotAction.INSTANCE, + new TransportDeleteSnapshotAction( + transportService, clusterService, threadPool, + snapshotsService, actionFilters, indexNameExpressionResolver + )); client.initialize(actions, () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); } diff --git a/server/src/test/java/org/elasticsearch/transport/CompressibleBytesOutputStreamTests.java b/server/src/test/java/org/elasticsearch/transport/CompressibleBytesOutputStreamTests.java index 58dc2a1e55b47..aeb92dac73479 100644 --- a/server/src/test/java/org/elasticsearch/transport/CompressibleBytesOutputStreamTests.java +++ b/server/src/test/java/org/elasticsearch/transport/CompressibleBytesOutputStreamTests.java @@ -39,6 +39,8 @@ public void testStreamWithoutCompression() throws IOException { stream.write(expectedBytes); BytesReference bytesRef = stream.materializeBytes(); + // Closing compression stream does not close underlying stream + stream.close(); assertFalse(CompressorFactory.COMPRESSOR.isCompressed(bytesRef)); @@ -48,7 +50,8 @@ public void testStreamWithoutCompression() throws IOException { assertEquals(-1, streamInput.read()); assertArrayEquals(expectedBytes, actualBytes); - stream.close(); + + bStream.close(); // The bytes should be zeroed out on close for (byte b : bytesRef.toBytesRef().bytes) { @@ -64,6 +67,7 @@ public void testStreamWithCompression() throws IOException { stream.write(expectedBytes); BytesReference bytesRef = stream.materializeBytes(); + stream.close(); assertTrue(CompressorFactory.COMPRESSOR.isCompressed(bytesRef)); @@ -73,7 +77,8 @@ public void testStreamWithCompression() throws IOException { assertEquals(-1, streamInput.read()); assertArrayEquals(expectedBytes, actualBytes); - stream.close(); + + bStream.close(); // The bytes should be zeroed out on close for (byte b : bytesRef.toBytesRef().bytes) { diff --git a/server/src/test/java/org/elasticsearch/transport/InboundMessageTests.java b/server/src/test/java/org/elasticsearch/transport/InboundMessageTests.java new file mode 100644 index 0000000000000..499b6586543ed --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/InboundMessageTests.java @@ -0,0 +1,228 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; + +public class InboundMessageTests extends ESTestCase { + + private final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + private final NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList()); + + public void testReadRequest() throws IOException { + String[] features = {"feature1", "feature2"}; + String value = randomAlphaOfLength(10); + Message message = new Message(value); + String action = randomAlphaOfLength(10); + long requestId = randomLong(); + boolean isHandshake = randomBoolean(); + boolean compress = randomBoolean(); + threadContext.putHeader("header", "header_value"); + Version version = randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()); + OutboundMessage.Request request = new OutboundMessage.Request(threadContext, features, message, version, action, requestId, + isHandshake, compress); + BytesReference reference; + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + reference = request.serialize(streamOutput); + } + // Check that the thread context is not deleted. + assertEquals("header_value", threadContext.getHeader("header")); + + threadContext.stashContext(); + threadContext.putHeader("header", "header_value2"); + + InboundMessage.Reader reader = new InboundMessage.Reader(version, registry, threadContext); + BytesReference sliced = reference.slice(6, reference.length() - 6); + InboundMessage.RequestMessage inboundMessage = (InboundMessage.RequestMessage) reader.deserialize(sliced); + // Check that deserialize does not overwrite current thread context. + assertEquals("header_value2", threadContext.getHeader("header")); + inboundMessage.getStoredContext().restore(); + assertEquals("header_value", threadContext.getHeader("header")); + assertEquals(isHandshake, inboundMessage.isHandshake()); + assertEquals(compress, inboundMessage.isCompress()); + assertEquals(version, inboundMessage.getVersion()); + assertEquals(action, inboundMessage.getActionName()); + assertEquals(new HashSet<>(Arrays.asList(features)), inboundMessage.getFeatures()); + assertTrue(inboundMessage.isRequest()); + assertFalse(inboundMessage.isResponse()); + assertFalse(inboundMessage.isError()); + assertEquals(value, new Message(inboundMessage.getStreamInput()).value); + } + + public void testReadResponse() throws IOException { + HashSet features = new HashSet<>(Arrays.asList("feature1", "feature2")); + String value = randomAlphaOfLength(10); + Message message = new Message(value); + long requestId = randomLong(); + boolean isHandshake = randomBoolean(); + boolean compress = randomBoolean(); + threadContext.putHeader("header", "header_value"); + Version version = randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()); + OutboundMessage.Response request = new OutboundMessage.Response(threadContext, features, message, version, requestId, isHandshake, + compress); + BytesReference reference; + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + reference = request.serialize(streamOutput); + } + // Check that the thread context is not deleted. + assertEquals("header_value", threadContext.getHeader("header")); + + threadContext.stashContext(); + threadContext.putHeader("header", "header_value2"); + + InboundMessage.Reader reader = new InboundMessage.Reader(version, registry, threadContext); + BytesReference sliced = reference.slice(6, reference.length() - 6); + InboundMessage.ResponseMessage inboundMessage = (InboundMessage.ResponseMessage) reader.deserialize(sliced); + // Check that deserialize does not overwrite current thread context. + assertEquals("header_value2", threadContext.getHeader("header")); + inboundMessage.getStoredContext().restore(); + assertEquals("header_value", threadContext.getHeader("header")); + assertEquals(isHandshake, inboundMessage.isHandshake()); + assertEquals(compress, inboundMessage.isCompress()); + assertEquals(version, inboundMessage.getVersion()); + assertTrue(inboundMessage.isResponse()); + assertFalse(inboundMessage.isRequest()); + assertFalse(inboundMessage.isError()); + assertEquals(value, new Message(inboundMessage.getStreamInput()).value); + } + + public void testReadErrorResponse() throws IOException { + HashSet features = new HashSet<>(Arrays.asList("feature1", "feature2")); + RemoteTransportException exception = new RemoteTransportException("error", new IOException()); + long requestId = randomLong(); + boolean isHandshake = randomBoolean(); + boolean compress = randomBoolean(); + threadContext.putHeader("header", "header_value"); + Version version = randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()); + OutboundMessage.Response request = new OutboundMessage.Response(threadContext, features, exception, version, requestId, + isHandshake, compress); + BytesReference reference; + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + reference = request.serialize(streamOutput); + } + // Check that the thread context is not deleted. + assertEquals("header_value", threadContext.getHeader("header")); + + threadContext.stashContext(); + threadContext.putHeader("header", "header_value2"); + + InboundMessage.Reader reader = new InboundMessage.Reader(version, registry, threadContext); + BytesReference sliced = reference.slice(6, reference.length() - 6); + InboundMessage.ResponseMessage inboundMessage = (InboundMessage.ResponseMessage) reader.deserialize(sliced); + // Check that deserialize does not overwrite current thread context. + assertEquals("header_value2", threadContext.getHeader("header")); + inboundMessage.getStoredContext().restore(); + assertEquals("header_value", threadContext.getHeader("header")); + assertEquals(isHandshake, inboundMessage.isHandshake()); + assertEquals(compress, inboundMessage.isCompress()); + assertEquals(version, inboundMessage.getVersion()); + assertTrue(inboundMessage.isResponse()); + assertFalse(inboundMessage.isRequest()); + assertTrue(inboundMessage.isError()); + assertEquals("[error]", inboundMessage.getStreamInput().readException().getMessage()); + } + + public void testEnsureVersionCompatibility() throws IOException { + testVersionIncompatibility(VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), + Version.CURRENT), Version.CURRENT, randomBoolean()); + + final Version version = Version.fromString("7.0.0"); + testVersionIncompatibility(Version.fromString("6.0.0"), version, true); + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> + testVersionIncompatibility(Version.fromString("6.0.0"), version, false)); + assertEquals("Received message from unsupported version: [6.0.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); + + // For handshake we are compatible with N-2 + testVersionIncompatibility(Version.fromString("5.6.0"), version, true); + ise = expectThrows(IllegalStateException.class, () -> + testVersionIncompatibility(Version.fromString("5.6.0"), version, false)); + assertEquals("Received message from unsupported version: [5.6.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); + + ise = expectThrows(IllegalStateException.class, () -> + testVersionIncompatibility(Version.fromString("2.3.0"), version, true)); + assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); + + ise = expectThrows(IllegalStateException.class, () -> + testVersionIncompatibility(Version.fromString("2.3.0"), version, false)); + assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); + } + + private void testVersionIncompatibility(Version version, Version currentVersion, boolean isHandshake) throws IOException { + String[] features = {}; + String value = randomAlphaOfLength(10); + Message message = new Message(value); + String action = randomAlphaOfLength(10); + long requestId = randomLong(); + boolean compress = randomBoolean(); + OutboundMessage.Request request = new OutboundMessage.Request(threadContext, features, message, version, action, requestId, + isHandshake, compress); + BytesReference reference; + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + reference = request.serialize(streamOutput); + } + + BytesReference sliced = reference.slice(6, reference.length() - 6); + InboundMessage.Reader reader = new InboundMessage.Reader(currentVersion, registry, threadContext); + reader.deserialize(sliced); + } + + private static final class Message extends TransportMessage { + + public String value; + + private Message() { + } + + private Message(StreamInput in) throws IOException { + readFrom(in); + } + + private Message(String value) { + this.value = value; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + value = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java new file mode 100644 index 0000000000000..01e391a30a732 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -0,0 +1,184 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashSet; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +public class OutboundHandlerTests extends ESTestCase { + + private final TestThreadPool threadPool = new TestThreadPool(getClass().getName()); + private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + private OutboundHandler handler; + private FakeTcpChannel fakeTcpChannel; + + @Before + public void setUp() throws Exception { + super.setUp(); + TransportLogger transportLogger = new TransportLogger(); + fakeTcpChannel = new FakeTcpChannel(randomBoolean()); + handler = new OutboundHandler(threadPool, BigArrays.NON_RECYCLING_INSTANCE, transportLogger); + } + + @After + public void tearDown() throws Exception { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + super.tearDown(); + } + + public void testSendRawBytes() { + BytesArray bytesArray = new BytesArray("message".getBytes(StandardCharsets.UTF_8)); + + AtomicBoolean isSuccess = new AtomicBoolean(false); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap((v) -> isSuccess.set(true), exception::set); + handler.sendBytes(fakeTcpChannel, bytesArray, listener); + + BytesReference reference = fakeTcpChannel.getMessageCaptor().get(); + ActionListener sendListener = fakeTcpChannel.getListenerCaptor().get(); + if (randomBoolean()) { + sendListener.onResponse(null); + assertTrue(isSuccess.get()); + assertNull(exception.get()); + } else { + IOException e = new IOException("failed"); + sendListener.onFailure(e); + assertFalse(isSuccess.get()); + assertSame(e, exception.get()); + } + + assertEquals(bytesArray, reference); + } + + public void testSendMessage() throws IOException { + OutboundMessage message; + ThreadContext threadContext = threadPool.getThreadContext(); + Version version = Version.CURRENT; + String actionName = "handshake"; + long requestId = randomLongBetween(0, 300); + boolean isHandshake = randomBoolean(); + boolean compress = randomBoolean(); + String value = "message"; + threadContext.putHeader("header", "header_value"); + Writeable writeable = new Message(value); + + boolean isRequest = randomBoolean(); + if (isRequest) { + message = new OutboundMessage.Request(threadContext, new String[0], writeable, version, actionName, requestId, isHandshake, + compress); + } else { + message = new OutboundMessage.Response(threadContext, new HashSet<>(), writeable, version, requestId, isHandshake, compress); + } + + AtomicBoolean isSuccess = new AtomicBoolean(false); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap((v) -> isSuccess.set(true), exception::set); + handler.sendMessage(fakeTcpChannel, message, listener); + + BytesReference reference = fakeTcpChannel.getMessageCaptor().get(); + ActionListener sendListener = fakeTcpChannel.getListenerCaptor().get(); + if (randomBoolean()) { + sendListener.onResponse(null); + assertTrue(isSuccess.get()); + assertNull(exception.get()); + } else { + IOException e = new IOException("failed"); + sendListener.onFailure(e); + assertFalse(isSuccess.get()); + assertSame(e, exception.get()); + } + + InboundMessage.Reader reader = new InboundMessage.Reader(Version.CURRENT, namedWriteableRegistry, threadPool.getThreadContext()); + try (InboundMessage inboundMessage = reader.deserialize(reference.slice(6, reference.length() - 6))) { + assertEquals(version, inboundMessage.getVersion()); + assertEquals(requestId, inboundMessage.getRequestId()); + if (isRequest) { + assertTrue(inboundMessage.isRequest()); + assertFalse(inboundMessage.isResponse()); + } else { + assertTrue(inboundMessage.isResponse()); + assertFalse(inboundMessage.isRequest()); + } + if (isHandshake) { + assertTrue(inboundMessage.isHandshake()); + } else { + assertFalse(inboundMessage.isHandshake()); + } + if (compress) { + assertTrue(inboundMessage.isCompress()); + } else { + assertFalse(inboundMessage.isCompress()); + } + Message readMessage = new Message(); + readMessage.readFrom(inboundMessage.getStreamInput()); + assertEquals(value, readMessage.value); + + try (ThreadContext.StoredContext existing = threadContext.stashContext()) { + ThreadContext.StoredContext storedContext = inboundMessage.getStoredContext(); + assertNull(threadContext.getHeader("header")); + storedContext.restore(); + assertEquals("header_value", threadContext.getHeader("header")); + } + } + } + + private static final class Message extends TransportMessage { + + public String value; + + private Message() { + } + + private Message(String value) { + this.value = value; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + value = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java new file mode 100644 index 0000000000000..1a6eaff9e5a2e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +public class RemoteClusterAwareClientTests extends ESTestCase { + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + private MockTransportService startTransport(String id, List knownNodes) { + return RemoteClusterConnectionTests.startTransport(id, knownNodes, Version.CURRENT, threadPool); + } + + public void testSearchShards() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes)) { + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); + try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + + try (RemoteClusterAwareClient client = new RemoteClusterAwareClient(Settings.EMPTY, threadPool, service, "cluster1")) { + SearchRequest request = new SearchRequest("test-index"); + CountDownLatch responseLatch = new CountDownLatch(1); + AtomicReference reference = new AtomicReference<>(); + ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") + .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) + .routing(request.routing()); + client.admin().cluster().searchShards(searchShardsRequest, + new LatchedActionListener<>(ActionListener.wrap(reference::set, e -> fail("no failures expected")), responseLatch)); + responseLatch.await(); + assertNotNull(reference.get()); + ClusterSearchShardsResponse clusterSearchShardsResponse = reference.get(); + assertEquals(knownNodes, Arrays.asList(clusterSearchShardsResponse.getNodes())); + } + } + } + } + + public void testSearchShardsThreadContextHeader() { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes)) { + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); + try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + + try (RemoteClusterAwareClient client = new RemoteClusterAwareClient(Settings.EMPTY, threadPool, service, "cluster1")) { + SearchRequest request = new SearchRequest("test-index"); + int numThreads = 10; + ExecutorService executorService = Executors.newFixedThreadPool(numThreads); + for (int i = 0; i < numThreads; i++) { + final String threadId = Integer.toString(i); + executorService.submit(() -> { + ThreadContext threadContext = seedTransport.threadPool.getThreadContext(); + threadContext.putHeader("threadId", threadId); + AtomicReference reference = new AtomicReference<>(); + final ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") + .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) + .routing(request.routing()); + CountDownLatch responseLatch = new CountDownLatch(1); + client.admin().cluster().searchShards(searchShardsRequest, + new LatchedActionListener<>(ActionListener.wrap( + resp -> { + reference.set(resp); + assertEquals(threadId, seedTransport.threadPool.getThreadContext().getHeader("threadId")); + }, + e -> fail("no failures expected")), responseLatch)); + try { + responseLatch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + assertNotNull(reference.get()); + ClusterSearchShardsResponse clusterSearchShardsResponse = reference.get(); + assertEquals(knownNodes, Arrays.asList(clusterSearchShardsResponse.getNodes())); + }); + } + ThreadPool.terminate(executorService, 5, TimeUnit.SECONDS); + } + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 02e701ed4bc86..308d330d54f61 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -43,7 +42,6 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CancellableThreads; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.core.internal.io.IOUtils; @@ -558,7 +556,7 @@ public void run() { } } - private List>> seedNodes(final DiscoveryNode... seedNodes) { + private static List>> seedNodes(final DiscoveryNode... seedNodes) { if (seedNodes.length == 0) { return Collections.emptyList(); } else if (seedNodes.length == 1) { @@ -570,205 +568,6 @@ private List>> seedNodes(final DiscoveryNo } } - public void testFetchShards() throws Exception { - List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); - MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - knownNodes.add(seedTransport.getLocalDiscoNode()); - knownNodes.add(discoverableTransport.getLocalDiscoNode()); - Collections.shuffle(knownNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { - service.start(); - service.acceptIncomingRequests(); - final List>> seedNodes = seedNodes(seedNode); - try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { - if (randomBoolean()) { - updateSeedNodes(connection, seedNodes); - } - if (randomBoolean()) { - connection.updateSkipUnavailable(randomBoolean()); - } - SearchRequest request = new SearchRequest("test-index"); - CountDownLatch responseLatch = new CountDownLatch(1); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") - .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) - .routing(request.routing()); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - responseLatch.await(); - assertNull(failReference.get()); - assertNotNull(reference.get()); - ClusterSearchShardsResponse clusterSearchShardsResponse = reference.get(); - assertEquals(knownNodes, Arrays.asList(clusterSearchShardsResponse.getNodes())); - assertTrue(connection.assertNoRunningConnections()); - } - } - } - } - - public void testFetchShardsThreadContextHeader() throws Exception { - List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); - MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - knownNodes.add(seedTransport.getLocalDiscoNode()); - knownNodes.add(discoverableTransport.getLocalDiscoNode()); - Collections.shuffle(knownNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { - service.start(); - service.acceptIncomingRequests(); - final List>> seedNodes = seedNodes(seedNode); - try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { - SearchRequest request = new SearchRequest("test-index"); - Thread[] threads = new Thread[10]; - for (int i = 0; i < threads.length; i++) { - final String threadId = Integer.toString(i); - threads[i] = new Thread(() -> { - ThreadContext threadContext = seedTransport.threadPool.getThreadContext(); - threadContext.putHeader("threadId", threadId); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - final ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") - .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) - .routing(request.routing()); - CountDownLatch responseLatch = new CountDownLatch(1); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap( - resp -> { - reference.set(resp); - assertEquals(threadId, seedTransport.threadPool.getThreadContext().getHeader("threadId")); - }, - failReference::set), responseLatch)); - try { - responseLatch.await(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - assertNull(failReference.get()); - assertNotNull(reference.get()); - ClusterSearchShardsResponse clusterSearchShardsResponse = reference.get(); - assertEquals(knownNodes, Arrays.asList(clusterSearchShardsResponse.getNodes())); - }); - } - for (int i = 0; i < threads.length; i++) { - threads[i].start(); - } - - for (int i = 0; i < threads.length; i++) { - threads[i].join(); - } - assertTrue(connection.assertNoRunningConnections()); - } - } - } - } - - public void testFetchShardsSkipUnavailable() throws Exception { - List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT)) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - knownNodes.add(seedNode); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { - service.start(); - service.acceptIncomingRequests(); - try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { - ConnectionManager connectionManager = connection.getConnectionManager(); - - SearchRequest request = new SearchRequest("test-index"); - ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") - .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) - .routing(request.routing()); - { - CountDownLatch responseLatch = new CountDownLatch(1); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); - assertNull(failReference.get()); - assertNotNull(reference.get()); - ClusterSearchShardsResponse response = reference.get(); - assertTrue(response != ClusterSearchShardsResponse.EMPTY); - assertEquals(knownNodes, Arrays.asList(response.getNodes())); - } - - CountDownLatch disconnectedLatch = new CountDownLatch(1); - connectionManager.addListener(new TransportConnectionListener() { - @Override - public void onNodeDisconnected(DiscoveryNode node) { - if (node.equals(seedNode)) { - disconnectedLatch.countDown(); - } - } - }); - - service.addFailToSendNoConnectRule(seedTransport); - - if (randomBoolean()) { - connection.updateSkipUnavailable(false); - } - { - CountDownLatch responseLatch = new CountDownLatch(1); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap((s) -> { - reference.set(s); - }, failReference::set), responseLatch)); - assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); - assertNotNull(failReference.get()); - assertNull(reference.get()); - assertThat(failReference.get(), instanceOf(TransportException.class)); - } - - connection.updateSkipUnavailable(true); - { - CountDownLatch responseLatch = new CountDownLatch(1); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); - assertNull(failReference.get()); - assertNotNull(reference.get()); - ClusterSearchShardsResponse response = reference.get(); - assertTrue(response == ClusterSearchShardsResponse.EMPTY); - } - - //give transport service enough time to realize that the node is down, and to notify the connection listeners - //so that RemoteClusterConnection is left with no connected nodes, hence it will retry connecting next - assertTrue(disconnectedLatch.await(10, TimeUnit.SECONDS)); - - if (randomBoolean()) { - connection.updateSkipUnavailable(false); - } - - service.clearAllRules(); - //check that we reconnect once the node is back up - { - CountDownLatch responseLatch = new CountDownLatch(1); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); - assertNull(failReference.get()); - assertNotNull(reference.get()); - ClusterSearchShardsResponse response = reference.get(); - assertTrue(response != ClusterSearchShardsResponse.EMPTY); - assertEquals(knownNodes, Arrays.asList(response.getNodes())); - } - } - } - } - } - public void testTriggerUpdatesConcurrently() throws IOException, InterruptedException { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index d5671eec21961..60f3ece86bcbe 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -20,9 +20,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; @@ -33,7 +31,6 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; @@ -60,9 +57,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -711,172 +706,6 @@ public void onFailure(Exception e) { } } - public void testCollectSearchShards() throws Exception { - int numClusters = randomIntBetween(2, 10); - MockTransportService[] mockTransportServices = new MockTransportService[numClusters]; - DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; - Map remoteIndicesByCluster = new HashMap<>(); - Settings.Builder builder = Settings.builder(); - for (int i = 0; i < numClusters; i++) { - List knownNodes = new CopyOnWriteArrayList<>(); - MockTransportService remoteSeedTransport = startTransport("node_remote" + i, knownNodes, Version.CURRENT); - mockTransportServices[i] = remoteSeedTransport; - DiscoveryNode remoteSeedNode = remoteSeedTransport.getLocalDiscoNode(); - knownNodes.add(remoteSeedNode); - nodes[i] = remoteSeedNode; - builder.put("cluster.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString()); - remoteIndicesByCluster.put("remote" + i, new OriginalIndices(new String[]{"index"}, IndicesOptions.lenientExpandOpen())); - } - Settings settings = builder.build(); - - try { - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { - service.start(); - service.acceptIncomingRequests(); - try (RemoteClusterService remoteClusterService = new RemoteClusterService(settings, service)) { - assertFalse(remoteClusterService.isCrossClusterSearchEnabled()); - remoteClusterService.initializeRemoteClusters(); - assertTrue(remoteClusterService.isCrossClusterSearchEnabled()); - { - final CountDownLatch latch = new CountDownLatch(1); - AtomicReference> response = new AtomicReference<>(); - AtomicReference failure = new AtomicReference<>(); - remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, remoteIndicesByCluster, - new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); - awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNull(failure.get()); - assertNotNull(response.get()); - Map map = response.get(); - assertEquals(numClusters, map.size()); - for (int i = 0; i < numClusters; i++) { - String clusterAlias = "remote" + i; - assertTrue(map.containsKey(clusterAlias)); - ClusterSearchShardsResponse shardsResponse = map.get(clusterAlias); - assertEquals(1, shardsResponse.getNodes().length); - } - } - { - final CountDownLatch latch = new CountDownLatch(1); - AtomicReference> response = new AtomicReference<>(); - AtomicReference failure = new AtomicReference<>(); - remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), "index_not_found", - null, remoteIndicesByCluster, - new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); - awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNull(response.get()); - assertNotNull(failure.get()); - assertThat(failure.get(), instanceOf(RemoteTransportException.class)); - RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); - assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); - } - int numDisconnectedClusters = randomIntBetween(1, numClusters); - Set disconnectedNodes = new HashSet<>(numDisconnectedClusters); - Set disconnectedNodesIndices = new HashSet<>(numDisconnectedClusters); - while(disconnectedNodes.size() < numDisconnectedClusters) { - int i = randomIntBetween(0, numClusters - 1); - if (disconnectedNodes.add(nodes[i])) { - assertTrue(disconnectedNodesIndices.add(i)); - } - } - - CountDownLatch disconnectedLatch = new CountDownLatch(numDisconnectedClusters); - for (RemoteClusterConnection connection : remoteClusterService.getConnections()) { - connection.getConnectionManager().addListener(new TransportConnectionListener() { - @Override - public void onNodeDisconnected(DiscoveryNode node) { - if (disconnectedNodes.remove(node)) { - disconnectedLatch.countDown(); - } - } - }); - } - - for (DiscoveryNode disconnectedNode : disconnectedNodes) { - service.addFailToSendNoConnectRule(disconnectedNode.getAddress()); - } - - { - final CountDownLatch latch = new CountDownLatch(1); - AtomicReference> response = new AtomicReference<>(); - AtomicReference failure = new AtomicReference<>(); - remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, remoteIndicesByCluster, - new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); - awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNull(response.get()); - assertNotNull(failure.get()); - assertThat(failure.get(), instanceOf(RemoteTransportException.class)); - assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster [")); - assertThat(failure.get().getCause(), instanceOf(NodeDisconnectedException.class)); - } - - //setting skip_unavailable to true for all the disconnected clusters will make the request succeed again - for (int i : disconnectedNodesIndices) { - remoteClusterService.updateSkipUnavailable("remote" + i, true); - } - { - final CountDownLatch latch = new CountDownLatch(1); - AtomicReference> response = new AtomicReference<>(); - AtomicReference failure = new AtomicReference<>(); - remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, remoteIndicesByCluster, - new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); - awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNull(failure.get()); - assertNotNull(response.get()); - Map map = response.get(); - assertEquals(numClusters, map.size()); - for (int i = 0; i < numClusters; i++) { - String clusterAlias = "remote" + i; - assertTrue(map.containsKey(clusterAlias)); - ClusterSearchShardsResponse shardsResponse = map.get(clusterAlias); - if (disconnectedNodesIndices.contains(i)) { - assertTrue(shardsResponse == ClusterSearchShardsResponse.EMPTY); - } else { - assertTrue(shardsResponse != ClusterSearchShardsResponse.EMPTY); - } - } - } - - //give transport service enough time to realize that the node is down, and to notify the connection listeners - //so that RemoteClusterConnection is left with no connected nodes, hence it will retry connecting next - assertTrue(disconnectedLatch.await(5, TimeUnit.SECONDS)); - - service.clearAllRules(); - if (randomBoolean()) { - for (int i : disconnectedNodesIndices) { - if (randomBoolean()) { - remoteClusterService.updateSkipUnavailable("remote" + i, true); - } - - } - } - { - final CountDownLatch latch = new CountDownLatch(1); - AtomicReference> response = new AtomicReference<>(); - AtomicReference failure = new AtomicReference<>(); - remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, remoteIndicesByCluster, - new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); - awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNull(failure.get()); - assertNotNull(response.get()); - Map map = response.get(); - assertEquals(numClusters, map.size()); - for (int i = 0; i < numClusters; i++) { - String clusterAlias = "remote" + i; - assertTrue(map.containsKey(clusterAlias)); - ClusterSearchShardsResponse shardsResponse = map.get(clusterAlias); - assertNotSame(ClusterSearchShardsResponse.EMPTY, shardsResponse); - } - } - assertEquals(0, service.getConnectionManager().size()); - } - } - } finally { - for (MockTransportService mockTransportService : mockTransportServices) { - mockTransportService.close(); - } - } - } - public void testRemoteClusterSkipIfDisconnectedSetting() { { Settings settings = Settings.builder() @@ -1079,7 +908,7 @@ public void testRemoteClusterWithProxy() throws Exception { } } - private void updateRemoteCluster(RemoteClusterService service, String clusterAlias, List addresses, String proxyAddress) + private static void updateRemoteCluster(RemoteClusterService service, String clusterAlias, List addresses, String proxyAddress) throws Exception { CountDownLatch latch = new CountDownLatch(1); AtomicReference exceptionAtomicReference = new AtomicReference<>(); @@ -1093,4 +922,40 @@ private void updateRemoteCluster(RemoteClusterService service, String clusterAli throw exceptionAtomicReference.get(); } } + + public static void updateSkipUnavailable(RemoteClusterService service, String clusterAlias, boolean skipUnavailable) { + RemoteClusterConnection connection = service.getRemoteClusterConnection(clusterAlias); + connection.updateSkipUnavailable(skipUnavailable); + } + + public static void addConnectionListener(RemoteClusterService service, TransportConnectionListener listener) { + for (RemoteClusterConnection connection : service.getConnections()) { + ConnectionManager connectionManager = connection.getConnectionManager(); + connectionManager.addListener(listener); + } + } + + public void testSkipUnavailable() { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT)) { + DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + knownNodes.add(seedNode); + Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); + try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + + assertFalse(service.getRemoteClusterService().isSkipUnavailable("cluster1")); + + if (randomBoolean()) { + updateSkipUnavailable(service.getRemoteClusterService(), "cluster1", false); + assertFalse(service.getRemoteClusterService().isSkipUnavailable("cluster1")); + } + + updateSkipUnavailable(service.getRemoteClusterService(), "cluster1", true); + assertTrue(service.getRemoteClusterService().isSkipUnavailable("cluster1")); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index f3294366b8fe1..a25ac2a551a22 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -37,7 +37,6 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -158,41 +157,12 @@ public void testAddressLimit() throws Exception { assertEquals(102, addresses[2].getPort()); } - public void testEnsureVersionCompatibility() { - TcpTransport.ensureVersionCompatibility(VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), - Version.CURRENT), Version.CURRENT, randomBoolean()); - - final Version version = Version.fromString("7.0.0"); - TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), version, true); - IllegalStateException ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), version, false)); - assertEquals("Received message from unsupported version: [6.0.0] minimal compatible version is: [" - + version.minimumCompatibilityVersion() + "]", ise.getMessage()); - - // For handshake we are compatible with N-2 - TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), version, true); - ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), version, false)); - assertEquals("Received message from unsupported version: [5.6.0] minimal compatible version is: [" - + version.minimumCompatibilityVersion() + "]", ise.getMessage()); - - ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), version, true)); - assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [" - + version.minimumCompatibilityVersion() + "]", ise.getMessage()); - - ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), version, false)); - assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [" - + version.minimumCompatibilityVersion() + "]", ise.getMessage()); - } - @SuppressForbidden(reason = "Allow accessing localhost") public void testCompressRequestAndResponse() throws IOException { final boolean compressed = randomBoolean(); Req request = new Req(randomRealisticUnicodeOfLengthBetween(10, 100)); ThreadPool threadPool = new TestThreadPool(TcpTransportTests.class.getName()); - AtomicReference requestCaptor = new AtomicReference<>(); + AtomicReference messageCaptor = new AtomicReference<>(); try { TcpTransport transport = new TcpTransport("test", Settings.EMPTY, Version.CURRENT, threadPool, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), null, null) { @@ -204,7 +174,7 @@ protected FakeServerChannel bind(String name, InetSocketAddress address) throws @Override protected FakeTcpChannel initiateChannel(DiscoveryNode node) throws IOException { - return new FakeTcpChannel(false, requestCaptor); + return new FakeTcpChannel(false); } @Override @@ -219,7 +189,7 @@ public Releasable openConnection(DiscoveryNode node, ConnectionProfile profile, int numConnections = profile.getNumConnections(); ArrayList fakeChannels = new ArrayList<>(numConnections); for (int i = 0; i < numConnections; ++i) { - fakeChannels.add(new FakeTcpChannel(false, requestCaptor)); + fakeChannels.add(new FakeTcpChannel(false, messageCaptor)); } listener.onResponse(new NodeChannels(node, fakeChannels, profile, Version.CURRENT)); return () -> CloseableChannel.closeChannels(fakeChannels, false); @@ -241,12 +211,12 @@ public Releasable openConnection(DiscoveryNode node, ConnectionProfile profile, (request1, channel, task) -> channel.sendResponse(TransportResponse.Empty.INSTANCE), ThreadPool.Names.SAME, true, true)); - BytesReference reference = requestCaptor.get(); + BytesReference reference = messageCaptor.get(); assertNotNull(reference); AtomicReference responseCaptor = new AtomicReference<>(); InetSocketAddress address = new InetSocketAddress(InetAddress.getLocalHost(), 0); - FakeTcpChannel responseChannel = new FakeTcpChannel(true, address, address, responseCaptor); + FakeTcpChannel responseChannel = new FakeTcpChannel(true, address, address, "profile", responseCaptor); transport.messageReceived(reference.slice(6, reference.length() - 6), responseChannel); diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index 5d96cd37b054d..6ec2732aaf915 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -145,6 +145,9 @@ public static SearchSourceBuilder randomSearchSourceBuilder( if (randomBoolean()) { builder.version(randomBoolean()); } + if (randomBoolean()) { + builder.seqNoAndPrimaryTerm(randomBoolean()); + } if (randomBoolean()) { builder.trackScores(randomBoolean()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java index 22ed586043d83..58107cbc7318a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.util.Date; import java.util.function.Predicate; import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; @@ -101,4 +102,10 @@ protected boolean assertToXContentEquivalence() { return true; } + /** + * @return a random date between 1970 and ca 2065 + */ + protected Date randomDate() { + return new Date(randomLongBetween(0, 3000000000000L)); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 012c574f9e6aa..55e356d093e6e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1727,8 +1727,16 @@ private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback) removeExclusions(excludedNodeIds); - nodeAndClient.recreateNode(newSettings, () -> rebuildUnicastHostFiles(emptyList())); - nodeAndClient.startNode(); + boolean success = false; + try { + nodeAndClient.recreateNode(newSettings, () -> rebuildUnicastHostFiles(emptyList())); + nodeAndClient.startNode(); + success = true; + } finally { + if (success == false) + nodes.remove(nodeAndClient.name); + } + if (activeDisruptionScheme != null) { activeDisruptionScheme.applyToNode(nodeAndClient.name, this); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 18edb5ec3790a..6a17f65790368 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -504,6 +504,16 @@ public boolean version() { public void version(boolean version) { } + @Override + public boolean seqNoAndPrimaryTerm() { + return false; + } + + @Override + public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + + } + @Override public int[] docIdsToLoad() { return new int[0]; diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 43542d48a6c39..87343d4c82087 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -2038,11 +2038,14 @@ public void testTcpHandshake() { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) { @Override - protected String handleRequest(TcpChannel mockChannel, String profileName, StreamInput stream, long requestId, - int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) + protected void handleRequest(TcpChannel channel, InboundMessage.RequestMessage request, int messageLengthBytes) throws IOException { - return super.handleRequest(mockChannel, profileName, stream, requestId, messageLengthBytes, version, remoteAddress, - (byte) (status & ~(1 << 3))); // we flip the isHandshake bit back and act like the handler is not found + // we flip the isHandshake bit back and act like the handler is not found + byte status = (byte) (request.status & ~(1 << 3)); + Version version = request.getVersion(); + InboundMessage.RequestMessage nonHandshakeRequest = new InboundMessage.RequestMessage(request.threadContext, version, + status, request.getRequestId(), request.getActionName(), request.getFeatures(), request.getStreamInput()); + super.handleRequest(channel, nonHandshakeRequest, messageLengthBytes); } }; diff --git a/test/framework/src/main/java/org/elasticsearch/transport/FakeTcpChannel.java b/test/framework/src/main/java/org/elasticsearch/transport/FakeTcpChannel.java index 63cacfbb093a8..bb392554305c0 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/FakeTcpChannel.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/FakeTcpChannel.java @@ -31,9 +31,10 @@ public class FakeTcpChannel implements TcpChannel { private final InetSocketAddress localAddress; private final InetSocketAddress remoteAddress; private final String profile; - private final AtomicReference messageCaptor; private final ChannelStats stats = new ChannelStats(); private final CompletableContext closeContext = new CompletableContext<>(); + private final AtomicReference messageCaptor; + private final AtomicReference> listenerCaptor; public FakeTcpChannel() { this(false, "profile", new AtomicReference<>()); @@ -47,11 +48,6 @@ public FakeTcpChannel(boolean isServer, AtomicReference messageC this(isServer, "profile", messageCaptor); } - public FakeTcpChannel(boolean isServer, InetSocketAddress localAddress, InetSocketAddress remoteAddress, - AtomicReference messageCaptor) { - this(isServer, localAddress, remoteAddress,"profile", messageCaptor); - } - public FakeTcpChannel(boolean isServer, String profile, AtomicReference messageCaptor) { this(isServer, null, null, profile, messageCaptor); @@ -64,6 +60,7 @@ public FakeTcpChannel(boolean isServer, InetSocketAddress localAddress, InetSock this.remoteAddress = remoteAddress; this.profile = profile; this.messageCaptor = messageCaptor; + this.listenerCaptor = new AtomicReference<>(); } @Override @@ -89,6 +86,7 @@ public InetSocketAddress getRemoteAddress() { @Override public void sendMessage(BytesReference reference, ActionListener listener) { messageCaptor.set(reference); + listenerCaptor.set(listener); } @Override @@ -115,4 +113,12 @@ public boolean isOpen() { public ChannelStats getChannelStats() { return stats; } + + public AtomicReference getMessageCaptor() { + return messageCaptor; + } + + public AtomicReference> getListenerCaptor() { + return listenerCaptor; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index cbadcfa1f38ac..a7dbce45d3c5d 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -37,7 +37,7 @@ import org.elasticsearch.nio.BytesWriteHandler; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelectorGroup; import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; @@ -69,7 +69,7 @@ public class MockNioTransport extends TcpTransport { private static final Logger logger = LogManager.getLogger(MockNioTransport.class); private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); - private volatile NioGroup nioGroup; + private volatile NioSelectorGroup nioGroup; private volatile MockTcpChannelFactory clientChannelFactory; public MockNioTransport(Settings settings, Version version, ThreadPool threadPool, NetworkService networkService, @@ -94,7 +94,7 @@ protected MockSocketChannel initiateChannel(DiscoveryNode node) throws IOExcepti protected void doStart() { boolean success = false; try { - nioGroup = new NioGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2, + nioGroup = new NioSelectorGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2, (s) -> new TestingSocketEventHandler(this::onNonChannelException, s)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); @@ -160,6 +160,11 @@ protected ConnectionProfile maybeOverrideConnectionProfile(ConnectionProfile con return builder.build(); } + private void onNonChannelException(Exception exception) { + logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), + exception); + } + private void exceptionCaught(NioSocketChannel channel, Exception exception) { onException((TcpChannel) channel, exception); } diff --git a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc index aca6fc27b1dd8..b9b6d44fd69b7 100644 --- a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc @@ -54,7 +54,7 @@ added to an index directly as part of the index creation: [source,shell] ------------------------------------------------------------------------------- -PUT /2015?include_type_name=true +PUT /2015 { "aliases" : { "current_year" : {} diff --git a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java index 3f88ce13f697d..476ba8d26bb0a 100644 --- a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java +++ b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.nio.file.Path; +import java.time.ZoneOffset; import java.util.UUID; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; @@ -34,9 +35,8 @@ public class TestUtils { public static final String PUBLIC_KEY_RESOURCE = "/public.key"; public static final String PRIVATE_KEY_RESOURCE = "/private.key"; - private static final DateFormatter formatDateTimeFormatter = - DateFormatter.forPattern("yyyy-MM-dd"); - private static final DateMathParser dateMathParser = formatDateTimeFormatter.toDateMathParser(); + private static final DateFormatter dateFormatter = DateFormatter.forPattern("yyyy-MM-dd"); + private static final DateMathParser dateMathParser = dateFormatter.toDateMathParser(); public static String dumpLicense(License license) throws Exception { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); @@ -49,11 +49,11 @@ public static String dumpLicense(License license) throws Exception { } public static String dateMathString(String time, final long now) { - return formatDateTimeFormatter.formatMillis(dateMathParser.parse(time, () -> now)); + return dateFormatter.format(dateMathParser.parse(time, () -> now).atZone(ZoneOffset.UTC)); } public static long dateMath(String time, final long now) { - return dateMathParser.parse(time, () -> now); + return dateMathParser.parse(time, () -> now).toEpochMilli(); } public static LicenseSpec generateRandomLicenseSpec(int version) { diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index b9cd464241fa2..c16a80ff37d8b 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -141,7 +141,7 @@ integTestCluster { setting 'xpack.license.self_generated.type', 'trial' keystoreSetting 'bootstrap.password', 'x-pack-test-password' keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - distribution = 'zip' // this is important since we use the reindex module in ML + distribution = 'default' // this is important since we use the reindex module in ML setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'superuser' diff --git a/x-pack/plugin/ccr/qa/rest/build.gradle b/x-pack/plugin/ccr/qa/rest/build.gradle index 38873262ed273..c890064504b51 100644 --- a/x-pack/plugin/ccr/qa/rest/build.gradle +++ b/x-pack/plugin/ccr/qa/rest/build.gradle @@ -12,7 +12,7 @@ task restTest(type: RestIntegTestTask) { } restTestCluster { - distribution 'zip' + distribution 'default' // Disable assertions in FollowingEngineAssertions, otherwise an AssertionError is thrown before // indexing a document directly in a follower index. In a rest test we like to test the exception // that is thrown in production when indexing a document directly in a follower index. diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml index aa63c804aba21..5b3e6c18ef29b 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml @@ -43,6 +43,12 @@ - is_true: follow_index_shards_acked - is_true: index_following_started + - do: + ccr.follow_stats: + index: _all + - length: { indices: 1 } + - match: { indices.0.index: "bar" } + # we can not reliably wait for replication to occur so we test the endpoint without indexing any documents - do: ccr.follow_stats: @@ -77,3 +83,7 @@ index: bar - is_true: acknowledged + - do: + catch: missing + ccr.follow_stats: + index: unknown diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index ab0f995803762..8ab9e396b4dc1 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -179,8 +179,7 @@ public List> getPersistentTasksExecutor(ClusterServic ThreadPool threadPool, Client client, SettingsModule settingsModule) { - IndexScopedSettings indexScopedSettings = settingsModule.getIndexScopedSettings(); - return Collections.singletonList(new ShardFollowTasksExecutor(client, threadPool, clusterService, indexScopedSettings)); + return Collections.singletonList(new ShardFollowTasksExecutor(client, threadPool, clusterService, settingsModule)); } public List> getActions() { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java index 26089ab46952d..d6262a7711dad 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java @@ -30,11 +30,17 @@ public final class CcrSettings { Setting.boolSetting("index.xpack.ccr.following_index", false, Property.IndexScope, Property.InternalIndex); /** - * Dynamic node setting for specifying the wait_for_timeout that the auto follow coordinator should be using. + * Dynamic node setting for specifying the wait_for_timeout that the auto follow coordinator and shard follow task should be using. */ - public static final Setting CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT = Setting.timeSetting( - "ccr.auto_follow.wait_for_metadata_timeout", TimeValue.timeValueSeconds(60), Property.NodeScope, Property.Dynamic); + public static final Setting CCR_WAIT_FOR_METADATA_TIMEOUT = Setting.timeSetting( + "ccr.wait_for_metadata_timeout", TimeValue.timeValueSeconds(60), Property.NodeScope, Property.Dynamic); + /** + * Dynamic node setting for specifying the wait_for_timeout that the auto follow coordinator should be using. + * TODO: Deprecate and remove this setting + */ + private static final Setting CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT = Setting.timeSetting( + "ccr.auto_follow.wait_for_metadata_timeout", CCR_WAIT_FOR_METADATA_TIMEOUT, Property.NodeScope, Property.Dynamic); /** * Max bytes a node can recover per second. @@ -62,7 +68,8 @@ static List> getSettings() { CCR_FOLLOWING_INDEX_SETTING, RECOVERY_MAX_BYTES_PER_SECOND, INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, - CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT); + CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT, + CCR_WAIT_FOR_METADATA_TIMEOUT); } private final CombinedRateLimiter ccrRateLimiter; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index e3b008efc5657..19d2b9ce4797e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -113,8 +113,8 @@ protected boolean removeEldestEntry(final Map.Entry { + updateMapping(0L, followerMappingVersion -> { synchronized (ShardFollowNodeTask.this) { currentMappingVersion = followerMappingVersion; } @@ -370,7 +370,7 @@ private synchronized void handleWriteResponse(final BulkShardOperationsResponse coordinateReads(); } - private synchronized void maybeUpdateMapping(Long minimumRequiredMappingVersion, Runnable task) { + private synchronized void maybeUpdateMapping(long minimumRequiredMappingVersion, Runnable task) { if (currentMappingVersion >= minimumRequiredMappingVersion) { LOGGER.trace("{} mapping version [{}] is higher or equal than minimum required mapping version [{}]", params.getFollowShardId(), currentMappingVersion, minimumRequiredMappingVersion); @@ -378,7 +378,7 @@ private synchronized void maybeUpdateMapping(Long minimumRequiredMappingVersion, } else { LOGGER.trace("{} updating mapping, mapping version [{}] is lower than minimum required mapping version [{}]", params.getFollowShardId(), currentMappingVersion, minimumRequiredMappingVersion); - updateMapping(mappingVersion -> { + updateMapping(minimumRequiredMappingVersion, mappingVersion -> { currentMappingVersion = mappingVersion; task.run(); }); @@ -400,12 +400,13 @@ private synchronized void maybeUpdateSettings(final Long minimumRequiredSettings } } - private void updateMapping(LongConsumer handler) { - updateMapping(handler, new AtomicInteger(0)); + private void updateMapping(long minRequiredMappingVersion, LongConsumer handler) { + updateMapping(minRequiredMappingVersion, handler, new AtomicInteger(0)); } - private void updateMapping(LongConsumer handler, AtomicInteger retryCounter) { - innerUpdateMapping(handler, e -> handleFailure(e, retryCounter, () -> updateMapping(handler, retryCounter))); + private void updateMapping(long minRequiredMappingVersion, LongConsumer handler, AtomicInteger retryCounter) { + innerUpdateMapping(minRequiredMappingVersion, handler, + e -> handleFailure(e, retryCounter, () -> updateMapping(minRequiredMappingVersion, handler, retryCounter))); } private void updateSettings(final LongConsumer handler) { @@ -418,12 +419,15 @@ private void updateSettings(final LongConsumer handler, final AtomicInteger retr private void handleFailure(Exception e, AtomicInteger retryCounter, Runnable task) { assert e != null; - if (shouldRetry(params.getRemoteCluster(), e) && isStopped() == false) { - int currentRetry = retryCounter.incrementAndGet(); - LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying [{}]", - params.getFollowShardId(), currentRetry), e); - long delay = computeDelay(currentRetry, params.getReadPollTimeout().getMillis()); - scheduler.accept(TimeValue.timeValueMillis(delay), task); + if (shouldRetry(params.getRemoteCluster(), e)) { + if (isStopped() == false) { + // Only retry is the shard follow task is not stopped. + int currentRetry = retryCounter.incrementAndGet(); + LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying [{}]", + params.getFollowShardId(), currentRetry), e); + long delay = computeDelay(currentRetry, params.getReadPollTimeout().getMillis()); + scheduler.accept(TimeValue.timeValueMillis(delay), task); + } } else { fatalException = ExceptionsHelper.convertToElastic(e); LOGGER.warn("shard follow task encounter non-retryable error", e); @@ -468,7 +472,7 @@ static boolean shouldRetry(String remoteCluster, Exception e) { } // These methods are protected for testing purposes: - protected abstract void innerUpdateMapping(LongConsumer handler, Consumer errorHandler); + protected abstract void innerUpdateMapping(long minRequiredMappingVersion, LongConsumer handler, Consumer errorHandler); protected abstract void innerUpdateSettings(LongConsumer handler, Consumer errorHandler); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 97308126ffb3f..956171ba9b7c3 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -24,11 +24,13 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.Index; @@ -46,6 +48,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsRequest; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; @@ -69,16 +72,20 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor this.waitForMetadataTimeOut = newVal); } @Override @@ -112,33 +119,25 @@ protected AllocatedPersistentTask createTask(long id, String type, String action scheduler, System::nanoTime) { @Override - protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { - Index leaderIndex = params.getLeaderShardId().getIndex(); - Index followIndex = params.getFollowShardId().getIndex(); - - ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest(leaderIndex.getName()); - CheckedConsumer onResponse = clusterStateResponse -> { - IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().getIndexSafe(leaderIndex); - if (indexMetaData.getMappings().isEmpty()) { - assert indexMetaData.getMappingVersion() == 1; - handler.accept(indexMetaData.getMappingVersion()); - return; - } - - assert indexMetaData.getMappings().size() == 1 : "expected exactly one mapping, but got [" + - indexMetaData.getMappings().size() + "]"; - MappingMetaData mappingMetaData = indexMetaData.getMappings().iterator().next().value; - - PutMappingRequest putMappingRequest = CcrRequests.putMappingRequest(followIndex.getName(), mappingMetaData); - followerClient.admin().indices().putMapping(putMappingRequest, ActionListener.wrap( - putMappingResponse -> handler.accept(indexMetaData.getMappingVersion()), - errorHandler)); - }; - try { - remoteClient(params).admin().cluster().state(clusterStateRequest, ActionListener.wrap(onResponse, errorHandler)); - } catch (Exception e) { - errorHandler.accept(e); - } + protected void innerUpdateMapping(long minRequiredMappingVersion, LongConsumer handler, Consumer errorHandler) { + final Index followerIndex = params.getFollowShardId().getIndex(); + getIndexMetadata(minRequiredMappingVersion, 0L, params, ActionListener.wrap( + indexMetaData -> { + if (indexMetaData.getMappings().isEmpty()) { + assert indexMetaData.getMappingVersion() == 1; + handler.accept(indexMetaData.getMappingVersion()); + return; + } + assert indexMetaData.getMappings().size() == 1 : "expected exactly one mapping, but got [" + + indexMetaData.getMappings().size() + "]"; + MappingMetaData mappingMetaData = indexMetaData.getMappings().iterator().next().value; + PutMappingRequest putMappingRequest = CcrRequests.putMappingRequest(followerIndex.getName(), mappingMetaData); + followerClient.admin().indices().putMapping(putMappingRequest, ActionListener.wrap( + putMappingResponse -> handler.accept(indexMetaData.getMappingVersion()), + errorHandler)); + }, + errorHandler + )); } @Override @@ -257,6 +256,39 @@ private Client remoteClient(ShardFollowTask params) { return wrapClient(client.getRemoteClusterClient(params.getRemoteCluster()), params.getHeaders()); } + private void getIndexMetadata(long minRequiredMappingVersion, long minRequiredMetadataVersion, + ShardFollowTask params, ActionListener listener) { + final Index leaderIndex = params.getLeaderShardId().getIndex(); + final ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest(leaderIndex.getName()); + if (minRequiredMetadataVersion > 0) { + clusterStateRequest.waitForMetaDataVersion(minRequiredMetadataVersion).waitForTimeout(waitForMetadataTimeOut); + } + try { + remoteClient(params).admin().cluster().state(clusterStateRequest, ActionListener.wrap( + r -> { + // if wait_for_metadata_version timeout, the response is empty + if (r.getState() == null) { + assert minRequiredMetadataVersion > 0; + getIndexMetadata(minRequiredMappingVersion, minRequiredMetadataVersion, params, listener); + return; + } + final MetaData metaData = r.getState().metaData(); + final IndexMetaData indexMetaData = metaData.getIndexSafe(leaderIndex); + if (indexMetaData.getMappingVersion() < minRequiredMappingVersion) { + // ask for the next version. + getIndexMetadata(minRequiredMappingVersion, metaData.version() + 1, params, listener); + } else { + assert metaData.version() >= minRequiredMetadataVersion : metaData.version() + " < " + minRequiredMetadataVersion; + listener.onResponse(indexMetaData); + } + }, + listener::onFailure + )); + } catch (Exception e) { + listener.onFailure(e); + } + } + interface FollowerStatsInfoHandler { void accept(String followerHistoryUUID, long globalCheckpoint, long maxSeqNo); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java index 8ab66aec8e80b..dc684fbc904ce 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; @@ -13,6 +14,7 @@ import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -65,6 +67,15 @@ protected void doExecute( listener.onFailure(LicenseUtils.newComplianceException("ccr")); return; } + + if (Strings.isAllOrWildcard(request.indices()) == false) { + final ClusterState state = clusterService.state(); + Set shardFollowTaskFollowerIndices = findFollowerIndicesFromShardFollowTasks(state, request.indices()); + if (shardFollowTaskFollowerIndices.isEmpty()) { + String resources = String.join(",", request.indices()); + throw new ResourceNotFoundException("No shard follow tasks for follower indices [{}]", resources); + } + } super.doExecute(task, request, listener); } @@ -80,21 +91,7 @@ protected FollowStatsAction.StatsResponses newResponse( @Override protected void processTasks(final FollowStatsAction.StatsRequest request, final Consumer operation) { final ClusterState state = clusterService.state(); - final PersistentTasksCustomMetaData persistentTasksMetaData = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); - if (persistentTasksMetaData == null) { - return; - } - - final Set requestedFollowerIndices = request.indices() != null ? - new HashSet<>(Arrays.asList(request.indices())) : Collections.emptySet(); - final Set followerIndices = persistentTasksMetaData.tasks().stream() - .filter(persistentTask -> persistentTask.getTaskName().equals(ShardFollowTask.NAME)) - .map(persistentTask -> { - ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTask.getParams(); - return shardFollowTask.getFollowShardId().getIndexName(); - }) - .filter(followerIndex -> requestedFollowerIndices.isEmpty() || requestedFollowerIndices.contains(followerIndex)) - .collect(Collectors.toSet()); + final Set followerIndices = findFollowerIndicesFromShardFollowTasks(state, request.indices()); for (final Task task : taskManager.getTasks().values()) { if (task instanceof ShardFollowNodeTask) { @@ -114,4 +111,22 @@ protected void taskOperation( listener.onResponse(new FollowStatsAction.StatsResponse(task.getStatus())); } + static Set findFollowerIndicesFromShardFollowTasks(ClusterState state, String[] indices) { + final PersistentTasksCustomMetaData persistentTasksMetaData = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); + if (persistentTasksMetaData == null) { + return Collections.emptySet(); + } + + final Set requestedFollowerIndices = indices != null ? + new HashSet<>(Arrays.asList(indices)) : Collections.emptySet(); + return persistentTasksMetaData.tasks().stream() + .filter(persistentTask -> persistentTask.getTaskName().equals(ShardFollowTask.NAME)) + .map(persistentTask -> { + ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTask.getParams(); + return shardFollowTask.getFollowShardId().getIndexName(); + }) + .filter(followerIndex -> Strings.isAllOrWildcard(indices) || requestedFollowerIndices.contains(followerIndex)) + .collect(Collectors.toSet()); + } + } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java index 1c7f9f95adbbe..a72b2f21d71df 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java @@ -16,8 +16,10 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.CombinedRateLimiter; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.KeyedLock; @@ -42,6 +44,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.Consumer; +import java.util.function.LongConsumer; public class CcrRestoreSourceService extends AbstractLifecycleComponent implements IndexEventListener { @@ -52,6 +55,7 @@ public class CcrRestoreSourceService extends AbstractLifecycleComponent implemen private final CopyOnWriteArrayList> closeSessionListeners = new CopyOnWriteArrayList<>(); private final ThreadPool threadPool; private final CcrSettings ccrSettings; + private final CounterMetric throttleTime = new CounterMetric(); public CcrRestoreSourceService(ThreadPool threadPool, CcrSettings ccrSettings) { this.threadPool = threadPool; @@ -136,7 +140,7 @@ public synchronized SessionReader getSessionReader(String sessionUUID) { throw new IllegalArgumentException("session [" + sessionUUID + "] not found"); } restore.idle = false; - return new SessionReader(restore); + return new SessionReader(restore, ccrSettings, throttleTime::inc); } private void internalCloseSession(String sessionUUID, boolean throwIfSessionMissing) { @@ -182,6 +186,10 @@ private void maybeTimeout(String sessionUUID) { } } + public long getThrottleTime() { + return this.throttleTime.count(); + } + private static class RestoreSession extends AbstractRefCounted { private final String sessionUUID; @@ -254,9 +262,13 @@ protected void closeInternal() { public static class SessionReader implements Closeable { private final RestoreSession restoreSession; + private final CcrSettings ccrSettings; + private final LongConsumer throttleListener; - private SessionReader(RestoreSession restoreSession) { + private SessionReader(RestoreSession restoreSession, CcrSettings ccrSettings, LongConsumer throttleListener) { this.restoreSession = restoreSession; + this.ccrSettings = ccrSettings; + this.throttleListener = throttleListener; restoreSession.incRef(); } @@ -270,6 +282,9 @@ private SessionReader(RestoreSession restoreSession) { * @throws IOException if the read fails */ public long readFileBytes(String fileName, BytesReference reference) throws IOException { + CombinedRateLimiter rateLimiter = ccrSettings.getRateLimiter(); + long throttleTime = rateLimiter.maybePause(reference.length()); + throttleListener.accept(throttleTime); return restoreSession.readFileBytes(fileName, reference); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 7af3d690e3a99..65fd80325e716 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -201,7 +201,7 @@ private NodeConfigurationSource createNodeConfigurationSource(String leaderSeedA builder.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); // Let cluster state api return quickly in order to speed up auto follow tests: - builder.put(CcrSettings.CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT.getKey(), TimeValue.timeValueMillis(100)); + builder.put(CcrSettings.CCR_WAIT_FOR_METADATA_TIMEOUT.getKey(), TimeValue.timeValueMillis(100)); if (configureRemoteClusterViaNodeSettings() && leaderSeedAddress != null) { builder.put("cluster.remote.leader_cluster.seeds", leaderSeedAddress); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java index e77a672f1fddb..ad8f545fa9dc0 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java @@ -46,7 +46,7 @@ protected Settings nodeSettings() { builder.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); // Let cluster state api return quickly in order to speed up auto follow tests: - builder.put(CcrSettings.CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT.getKey(), TimeValue.timeValueMillis(100)); + builder.put(CcrSettings.CCR_WAIT_FOR_METADATA_TIMEOUT.getKey(), TimeValue.timeValueMillis(100)); return builder.build(); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index 47cc1c528fa5c..0a3669734dc6b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -238,9 +238,15 @@ public void testDocsAreRecovered() throws Exception { } public void testRateLimitingIsEmployed() throws Exception { + boolean followerRateLimiting = randomBoolean(); + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getKey(), "10K")); - assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + if (followerRateLimiting) { + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } else { + assertAcked(leaderClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster"; String leaderIndex = "index1"; @@ -256,11 +262,15 @@ public void testRateLimitingIsEmployed() throws Exception { final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); List repositories = new ArrayList<>(); + List restoreSources = new ArrayList<>(); for (RepositoriesService repositoriesService : getFollowerCluster().getDataOrMasterNodeInstances(RepositoriesService.class)) { Repository repository = repositoriesService.repository(leaderClusterRepoName); repositories.add((CcrRepository) repository); } + for (CcrRestoreSourceService restoreSource : getLeaderCluster().getDataOrMasterNodeInstances(CcrRestoreSourceService.class)) { + restoreSources.add(restoreSource); + } logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { @@ -282,12 +292,20 @@ public void testRateLimitingIsEmployed() throws Exception { restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); future.actionGet(); - assertTrue(repositories.stream().anyMatch(cr -> cr.getRestoreThrottleTimeInNanos() > 0)); + if (followerRateLimiting) { + assertTrue(repositories.stream().anyMatch(cr -> cr.getRestoreThrottleTimeInNanos() > 0)); + } else { + assertTrue(restoreSources.stream().anyMatch(cr -> cr.getThrottleTime() > 0)); + } settingsRequest = new ClusterUpdateSettingsRequest(); ByteSizeValue defaultValue = CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getDefault(Settings.EMPTY); settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getKey(), defaultValue)); - assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + if (followerRateLimiting) { + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } else { + assertAcked(leaderClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } } public void testFollowerMappingIsUpdated() throws IOException { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java index 409746f9d851b..1f1c6cd5c64e3 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java @@ -6,9 +6,12 @@ package org.elasticsearch.xpack.ccr; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -116,4 +119,106 @@ public void testFollowStatsApiFollowerIndexFiltering() throws Exception { }); } + public void testFollowStatsApiResourceNotFound() throws Exception { + FollowStatsAction.StatsRequest statsRequest = new FollowStatsAction.StatsRequest(); + FollowStatsAction.StatsResponses response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(0)); + + statsRequest.setIndices(new String[] {"follower1"}); + Exception e = expectThrows(ResourceNotFoundException.class, + () -> client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet()); + assertThat(e.getMessage(), equalTo("No shard follow tasks for follower indices [follower1]")); + + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest.setIndices(new String[] {"follower2"}); + e = expectThrows(ResourceNotFoundException.class, + () -> client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet()); + assertThat(e.getMessage(), equalTo("No shard follow tasks for follower indices [follower2]")); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + } + + public void testFollowStatsApiIncludeShardFollowStatsWithRemovedFollowerIndex() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + FollowStatsAction.StatsRequest statsRequest = new FollowStatsAction.StatsRequest(); + FollowStatsAction.StatsResponses response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().admin().indices().delete(new DeleteIndexRequest("follower1")).actionGet()); + + statsRequest = new FollowStatsAction.StatsRequest(); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + } + + public void testFollowStatsApiIncludeShardFollowStatsWithClosedFollowerIndex() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + FollowStatsAction.StatsRequest statsRequest = new FollowStatsAction.StatsRequest(); + FollowStatsAction.StatsResponses response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().admin().indices().close(new CloseIndexRequest("follower1")).actionGet()); + + statsRequest = new FollowStatsAction.StatsRequest(); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java index d58a2d0a0f18d..f03eeaaa03648 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java @@ -6,25 +6,34 @@ package org.elasticsearch.xpack.ccr; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.xpack.CcrIntegTestCase; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; +import org.elasticsearch.xpack.core.ccr.client.CcrClient; +import org.hamcrest.Matchers; import java.util.Locale; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -32,6 +41,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; public class FollowerFailOverIT extends CcrIntegTestCase { @@ -220,4 +230,56 @@ public void testAddNewReplicasOnFollower() throws Exception { pauseFollow("follower-index"); } + public void testReadRequestsReturnsLatestMappingVersion() throws Exception { + InternalTestCluster leaderCluster = getLeaderCluster(); + Settings nodeAttributes = Settings.builder().put("node.attr.box", "large").build(); + String dataNode = leaderCluster.startDataOnlyNode(nodeAttributes); + assertAcked( + leaderClient().admin().indices().prepareCreate("leader-index") + .setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") + .put("index.routing.allocation.require.box", "large")) + .get() + ); + ClusterService clusterService = leaderCluster.clusterService(dataNode); + ShardId shardId = clusterService.state().routingTable().index("leader-index").shard(0).shardId(); + IndicesService indicesService = leaderCluster.getInstance(IndicesService.class, dataNode); + IndexShard indexShard = indicesService.getShardOrNull(shardId); + // Block the ClusterService from exposing the cluster state with the mapping change. This makes the ClusterService + // have an older mapping version than the actual mapping version that IndexService will use to index "doc1". + final CountDownLatch latch = new CountDownLatch(1); + clusterService.addLowPriorityApplier(event -> { + IndexMetaData imd = event.state().metaData().index("leader-index"); + if (imd != null && imd.mapping() != null && + XContentMapValues.extractValue("properties.balance.type", imd.mapping().sourceAsMap()) != null) { + try { + logger.info("--> block ClusterService from exposing new mapping version"); + latch.await(); + } catch (Exception e) { + throw new AssertionError(e); + } + } + }); + leaderCluster.client().admin().indices().preparePutMapping().setType("doc") + .setSource("balance", "type=long").setTimeout(TimeValue.ZERO).get(); + IndexResponse indexResp = leaderCluster.client(dataNode).prepareIndex("leader-index", "doc", "1") + .setSource("{\"balance\": 100}", XContentType.JSON).setTimeout(TimeValue.ZERO).get(); + assertThat(indexResp.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertThat(indexShard.getGlobalCheckpoint(), equalTo(0L)); + getFollowerCluster().startDataOnlyNode(nodeAttributes); + followerClient().execute(PutFollowAction.INSTANCE, putFollow("leader-index", "follower-index")).get(); + ensureFollowerGreen("follower-index"); + // Make sure at least one read-request which requires mapping sync is completed. + assertBusy(() -> { + CcrClient ccrClient = new CcrClient(followerClient()); + FollowStatsAction.StatsResponses responses = ccrClient.followStats(new FollowStatsAction.StatsRequest()).actionGet(); + long bytesRead = responses.getStatsResponses().stream().mapToLong(r -> r.status().bytesRead()).sum(); + assertThat(bytesRead, Matchers.greaterThan(0L)); + }, 60, TimeUnit.SECONDS); + latch.countDown(); + assertIndexFullyReplicatedToFollower("leader-index", "follower-index"); + pauseFollow("follower-index"); + } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 2ac67a65fc1c6..4d4603d022f7d 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -1016,7 +1016,7 @@ private static Supplier localClusterStateSupplier(ClusterState... private ClusterService mockClusterService() { ClusterService clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = - new ClusterSettings(Settings.EMPTY, Collections.singleton(CcrSettings.CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT)); + new ClusterSettings(Settings.EMPTY, Collections.singleton(CcrSettings.CCR_WAIT_FOR_METADATA_TIMEOUT)); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); return clusterService; } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java index dde869f80bef9..4af9e7c23a276 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -111,7 +111,7 @@ private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testR private final Map fromToSlot = new HashMap<>(); @Override - protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + protected void innerUpdateMapping(long minRequiredMappingVersion, LongConsumer handler, Consumer errorHandler) { handler.accept(mappingVersion); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index cf7f901aaba33..a7d07b6066732 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -40,7 +40,9 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; public class ShardFollowNodeTaskTests extends ESTestCase { @@ -287,6 +289,35 @@ public void testReceiveRetryableError() { assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } + public void testFatalExceptionNotSetWhenStoppingWhileFetchingOps() { + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); + startTask(task, 63, -1); + + readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); + + mappingVersions.add(1L); + leaderGlobalCheckpoints.add(63L); + maxSeqNos.add(63L); + responseSizes.add(64); + simulateResponse.set(true); + beforeSendShardChangesRequest = status -> { + // Cancel just before attempting to fetch operations: + task.onCancelled(); + }; + task.coordinateReads(); + + assertThat(task.isStopped(), is(true)); + ShardFollowNodeTaskStatus status = task.getStatus(); + assertThat(status.getFatalException(), nullValue()); + assertThat(status.failedReadRequests(), equalTo(1L)); + assertThat(status.successfulReadRequests(), equalTo(0L)); + assertThat(status.readExceptions().size(), equalTo(1)); + } + public void testEmptyShardChangesResponseShouldClearFetchException() { ShardFollowTaskParams params = new ShardFollowTaskParams(); params.maxReadRequestOperationCount = 64; @@ -957,7 +988,7 @@ private ShardFollowNodeTask createShardFollowTask(ShardFollowTaskParams params) 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), followTask, scheduler, System::nanoTime) { @Override - protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + protected void innerUpdateMapping(long minRequiredMappingVersion, LongConsumer handler, Consumer errorHandler) { Exception failure = mappingUpdateFailures.poll(); if (failure != null) { errorHandler.accept(failure); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 9dc7c6648eebc..8a3e374a24c5a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -396,7 +396,7 @@ protected synchronized void onOperationsFetched(Translog.Operation[] operations) } @Override - protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + protected void innerUpdateMapping(long minRequiredMappingVersion, LongConsumer handler, Consumer errorHandler) { // noop, as mapping updates are not tested handler.accept(1L); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java new file mode 100644 index 0000000000000..bc8c58f1de7de --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class TransportFollowStatsActionTests extends ESTestCase { + + public void testFindFollowerIndicesFromShardFollowTasks() { + PersistentTasksCustomMetaData.Builder persistentTasks = PersistentTasksCustomMetaData.builder() + .addTask("1", ShardFollowTask.NAME, createShardFollowTask("abc"), null) + .addTask("2", ShardFollowTask.NAME, createShardFollowTask("def"), null); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_cluster")) + .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, persistentTasks.build()).build()) + .build(); + Set result = TransportFollowStatsAction.findFollowerIndicesFromShardFollowTasks(clusterState, null); + assertThat(result.size(), equalTo(2)); + assertThat(result.contains("abc"), is(true)); + assertThat(result.contains("def"), is(true)); + + result = TransportFollowStatsAction.findFollowerIndicesFromShardFollowTasks(clusterState, new String[]{"def"}); + assertThat(result.size(), equalTo(1)); + assertThat(result.contains("def"), is(true)); + + result = TransportFollowStatsAction.findFollowerIndicesFromShardFollowTasks(clusterState, new String[]{"ghi"}); + assertThat(result.size(), equalTo(0)); + } + + private static ShardFollowTask createShardFollowTask(String followerIndex) { + return new ShardFollowTask( + null, + new ShardId(followerIndex, "", 0), + new ShardId("leader_index", "", 0), + 1024, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + 1, + 1024, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + 1, + 10240, + new ByteSizeValue(512, ByteSizeUnit.MB), + TimeValue.timeValueMillis(10), + TimeValue.timeValueMillis(10), + Collections.emptyMap() + ); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java index 2769b76b65f10..9cec2b4cc7c1f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java @@ -5,39 +5,37 @@ */ package org.elasticsearch.license; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.time.DateFormatter; -import org.joda.time.MutableDateTime; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; +import org.elasticsearch.common.time.DateFormatters; import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; public class DateUtils { private static final DateFormatter dateOnlyFormatter = DateFormatter.forPattern("yyyy-MM-dd").withZone(ZoneOffset.UTC); - - private static final DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTime().withZoneUTC(); + private static final DateFormatter dateTimeFormatter = DateFormatter.forPattern("strict_date_time").withZone(ZoneOffset.UTC); public static long endOfTheDay(String date) { try { // Try parsing using complete date/time format - return dateTimeFormatter.parseDateTime(date).getMillis(); - } catch (IllegalArgumentException ex) { - // Fall back to the date only format - MutableDateTime dateTime = new MutableDateTime(dateOnlyFormatter.parseMillis(date)); - dateTime.millisOfDay().set(dateTime.millisOfDay().getMaximumValue()); - return dateTime.getMillis(); + return dateTimeFormatter.parseMillis(date); + } catch (ElasticsearchParseException | IllegalArgumentException ex) { + ZonedDateTime dateTime = DateFormatters.toZonedDateTime(dateOnlyFormatter.parse(date)); + dateTime.with(ChronoField.MILLI_OF_DAY, ChronoField.MILLI_OF_DAY.range().getMaximum()); + return dateTime.toInstant().toEpochMilli(); } } public static long beginningOfTheDay(String date) { try { // Try parsing using complete date/time format - return dateTimeFormatter.parseDateTime(date).getMillis(); - } catch (IllegalArgumentException ex) { + return dateTimeFormatter.parseMillis(date); + } catch (ElasticsearchParseException | IllegalArgumentException ex) { // Fall back to the date only format - return dateOnlyFormatter.parseMillis(date); + return DateFormatters.toZonedDateTime(dateOnlyFormatter.parse(date)).toInstant().toEpochMilli(); } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index 6b651444f2d7d..2b59ae21daf5a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -273,7 +273,7 @@ public AutoFollowPattern(String remoteCluster, public AutoFollowPattern(StreamInput in) throws IOException { remoteCluster = in.readString(); - leaderIndexPatterns = in.readList(StreamInput::readString); + leaderIndexPatterns = in.readStringList(); followIndexPattern = in.readOptionalString(); maxReadRequestOperationCount = in.readOptionalVInt(); maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); @@ -350,7 +350,7 @@ public TimeValue getPollTimeout() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); - out.writeStringList(leaderIndexPatterns); + out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexPattern); out.writeOptionalVInt(maxReadRequestOperationCount); out.writeOptionalWriteable(maxReadRequestSize); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index d0969850705f7..12d30e4d9f9b1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -275,7 +275,7 @@ public Request(StreamInput in) throws IOException { super(in); name = in.readString(); remoteCluster = in.readString(); - leaderIndexPatterns = in.readList(StreamInput::readString); + leaderIndexPatterns = in.readStringList(); followIndexNamePattern = in.readOptionalString(); maxReadRequestOperationCount = in.readOptionalVInt(); maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); @@ -294,7 +294,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(name); out.writeString(remoteCluster); - out.writeStringList(leaderIndexPatterns); + out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexNamePattern); out.writeOptionalVInt(maxReadRequestOperationCount); out.writeOptionalWriteable(maxReadRequestSize); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractUnfollowIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractUnfollowIndexStep.java index 8e0626425b490..570c4fdf26771 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractUnfollowIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractUnfollowIndexStep.java @@ -7,6 +7,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetaData; import java.util.Map; @@ -20,7 +21,8 @@ abstract class AbstractUnfollowIndexStep extends AsyncActionStep { } @Override - public final void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, Listener listener) { + public final void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, + ClusterStateObserver observer, Listener listener) { String followerIndex = indexMetaData.getIndex().getName(); Map customIndexMetadata = indexMetaData.getCustomData(CCR_METADATA_KEY); if (customIndexMetadata == null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncActionStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncActionStep.java index 700c817164503..3195d2d525750 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncActionStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncActionStep.java @@ -7,6 +7,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetaData; /** @@ -29,7 +30,8 @@ public boolean indexSurvives() { return true; } - public abstract void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, Listener listener); + public abstract void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, + ClusterStateObserver observer, Listener listener); public interface Listener { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncRetryDuringSnapshotActionStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncRetryDuringSnapshotActionStep.java new file mode 100644 index 0000000000000..85c91a795f09d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncRetryDuringSnapshotActionStep.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.SnapshotInProgressException; + +import java.util.function.Consumer; + +/** + * This is an abstract AsyncActionStep that wraps the performed action listener, checking to see + * if the action fails due to a snapshot being in progress. If a snapshot is in progress, it + * registers an observer and waits to try again when a snapshot is no longer running. + */ +public abstract class AsyncRetryDuringSnapshotActionStep extends AsyncActionStep { + private final Logger logger = LogManager.getLogger(AsyncRetryDuringSnapshotActionStep.class); + + public AsyncRetryDuringSnapshotActionStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey, client); + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, + ClusterStateObserver observer, Listener listener) { + // Wrap the original listener to handle exceptions caused by ongoing snapshots + SnapshotExceptionListener snapshotExceptionListener = new SnapshotExceptionListener(indexMetaData.getIndex(), listener, observer); + performDuringNoSnapshot(indexMetaData, currentClusterState, snapshotExceptionListener); + } + + /** + * Method to be performed during which no snapshots for the index are already underway. + */ + abstract void performDuringNoSnapshot(IndexMetaData indexMetaData, ClusterState currentClusterState, Listener listener); + + /** + * SnapshotExceptionListener is an injected listener wrapper that checks to see if a particular + * action failed due to a {@code SnapshotInProgressException}. If it did, then it registers a + * ClusterStateObserver listener waiting for the next time the snapshot is not running, + * re-running the step's {@link #performAction(IndexMetaData, ClusterState, ClusterStateObserver, Listener)} + * method when the snapshot is no longer running. + */ + class SnapshotExceptionListener implements AsyncActionStep.Listener { + private final Index index; + private final Listener originalListener; + private final ClusterStateObserver observer; + + SnapshotExceptionListener(Index index, Listener originalListener, ClusterStateObserver observer) { + this.index = index; + this.originalListener = originalListener; + this.observer = observer; + } + + @Override + public void onResponse(boolean complete) { + originalListener.onResponse(complete); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof SnapshotInProgressException) { + try { + logger.debug("[{}] attempted to run ILM step but a snapshot is in progress, step will retry at a later time", + index.getName()); + observer.waitForNextChange( + new NoSnapshotRunningListener(observer, index.getName(), state -> { + IndexMetaData idxMeta = state.metaData().index(index); + if (idxMeta == null) { + // The index has since been deleted, mission accomplished! + originalListener.onResponse(true); + } + // Re-invoke the performAction method with the new state + performAction(idxMeta, state, observer, originalListener); + }, originalListener::onFailure), + // TODO: what is a good timeout value for no new state received during this time? + TimeValue.timeValueHours(12)); + } catch (Exception secondError) { + // There was a second error trying to set up an observer, + // fail the original listener + secondError.addSuppressed(e); + originalListener.onFailure(secondError); + } + } else { + originalListener.onFailure(e); + } + } + } + + /** + * A {@link ClusterStateObserver.Listener} that invokes the given function with the new state, + * once no snapshots are running. If a snapshot is still running it registers a new listener + * and tries again. Passes any exceptions to the original exception listener if they occur. + */ + class NoSnapshotRunningListener implements ClusterStateObserver.Listener { + + private final Consumer reRun; + private final Consumer exceptionConsumer; + private final ClusterStateObserver observer; + private final String indexName; + + NoSnapshotRunningListener(ClusterStateObserver observer, String indexName, + Consumer reRun, + Consumer exceptionConsumer) { + this.observer = observer; + this.reRun = reRun; + this.exceptionConsumer = exceptionConsumer; + this.indexName = indexName; + } + + @Override + public void onNewClusterState(ClusterState state) { + try { + if (snapshotInProgress(state)) { + observer.waitForNextChange(this); + } else { + logger.debug("[{}] retrying ILM step after snapshot has completed", indexName); + reRun.accept(state); + } + } catch (Exception e) { + exceptionConsumer.accept(e); + } + } + + private boolean snapshotInProgress(ClusterState state) { + SnapshotsInProgress snapshotsInProgress = state.custom(SnapshotsInProgress.TYPE); + if (snapshotsInProgress == null || snapshotsInProgress.entries().isEmpty()) { + // No snapshots are running, new state is acceptable to proceed + return false; + } + + for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { + if (snapshot.indices().stream() + .map(IndexId::getName) + .anyMatch(name -> name.equals(indexName))) { + // There is a snapshot running with this index name + return true; + } + } + // There are snapshots, but none for this index, so it's okay to proceed with this state + return false; + } + + @Override + public void onClusterServiceClose() { + // This means the cluster is being shut down, so nothing to do here + } + + @Override + public void onTimeout(TimeValue timeout) { + exceptionConsumer.accept(new IllegalStateException("step timed out while waiting for snapshots to complete")); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStep.java index 3fb6e145236bc..149bb6dfef324 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStep.java @@ -8,8 +8,14 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; -final class CloseFollowerIndexStep extends AbstractUnfollowIndexStep { +import java.util.Map; + +import static org.elasticsearch.xpack.core.indexlifecycle.UnfollowAction.CCR_METADATA_KEY; + +final class CloseFollowerIndexStep extends AsyncRetryDuringSnapshotActionStep { static final String NAME = "close-follower-index"; @@ -18,7 +24,14 @@ final class CloseFollowerIndexStep extends AbstractUnfollowIndexStep { } @Override - void innerPerformAction(String followerIndex, Listener listener) { + void performDuringNoSnapshot(IndexMetaData indexMetaData, ClusterState currentClusterState, Listener listener) { + String followerIndex = indexMetaData.getIndex().getName(); + Map customIndexMetadata = indexMetaData.getCustomData(CCR_METADATA_KEY); + if (customIndexMetadata == null) { + listener.onResponse(true); + return; + } + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(followerIndex); getClient().admin().indices().close(closeIndexRequest, ActionListener.wrap( r -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStep.java index f34da641a0cb4..62a5b7bea4b5e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStep.java @@ -14,7 +14,7 @@ /** * Deletes a single index. */ -public class DeleteStep extends AsyncActionStep { +public class DeleteStep extends AsyncRetryDuringSnapshotActionStep { public static final String NAME = "delete"; public DeleteStep(StepKey key, StepKey nextStepKey, Client client) { @@ -22,10 +22,10 @@ public DeleteStep(StepKey key, StepKey nextStepKey, Client client) { } @Override - public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + public void performDuringNoSnapshot(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { getClient().admin().indices() .delete(new DeleteIndexRequest(indexMetaData.getIndex().getName()), - ActionListener.wrap(response -> listener.onResponse(true) , listener::onFailure)); + ActionListener.wrap(response -> listener.onResponse(true), listener::onFailure)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStep.java index 799959ce69f61..871a95419a410 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStep.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetaData; import java.util.Objects; @@ -30,7 +31,7 @@ public int getMaxNumSegments() { } @Override - public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, ClusterStateObserver observer, Listener listener) { ForceMergeRequest request = new ForceMergeRequest(indexMetaData.getIndex().getName()); request.maxNumSegments(maxNumSegments); getClient().admin().indices() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStep.java index 523aad10a488a..ae7b0af6222dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStep.java @@ -14,7 +14,7 @@ /** * Freezes an index. */ -public class FreezeStep extends AsyncActionStep { +public class FreezeStep extends AsyncRetryDuringSnapshotActionStep { public static final String NAME = "freeze"; public FreezeStep(StepKey key, StepKey nextStepKey, Client client) { @@ -22,7 +22,7 @@ public FreezeStep(StepKey key, StepKey nextStepKey, Client client) { } @Override - public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + public void performDuringNoSnapshot(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { getClient().admin().indices().execute(TransportFreezeIndexAction.FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(indexMetaData.getIndex().getName()), ActionListener.wrap(response -> listener.onResponse(true), listener::onFailure)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyUtils.java new file mode 100644 index 0000000000000..c3752e3927b41 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyUtils.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.NotXContentException; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; + +/** + * A utility class used for loading index lifecycle policies from the resource classpath + */ +public class LifecyclePolicyUtils { + + private LifecyclePolicyUtils() {}; + + /** + * Loads a built-in index lifecycle policy and returns its source. + */ + public static LifecyclePolicy loadPolicy(String name, String resource, NamedXContentRegistry xContentRegistry) { + try { + BytesReference source = load(resource); + validate(source); + + try (XContentParser parser = XContentType.JSON.xContent() + .createParser(xContentRegistry, LoggingDeprecationHandler.THROW_UNSUPPORTED_OPERATION, source.utf8ToString())) { + return LifecyclePolicy.parse(parser, name); + } + } catch (Exception e) { + throw new IllegalArgumentException("unable to load policy [" + name + "] from [" + resource + "]", e); + } + } + + /** + * Loads a resource from the classpath and returns it as a {@link BytesReference} + */ + private static BytesReference load(String name) throws IOException { + try (InputStream is = LifecyclePolicyUtils.class.getResourceAsStream(name)) { + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + Streams.copy(is, out); + return new BytesArray(out.toByteArray()); + } + } + } + + /** + * Parses and validates that the source is not empty. + */ + private static void validate(BytesReference source) { + if (source == null) { + throw new ElasticsearchParseException("policy must not be null"); + } + + try { + XContentHelper.convertToMap(source, false, XContentType.JSON).v2(); + } catch (NotXContentException e) { + throw new ElasticsearchParseException("policy must not be empty"); + } catch (Exception e) { + throw new ElasticsearchParseException("invalid policy", e); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/OpenFollowerIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/OpenFollowerIndexStep.java index 7ba2c4633ab99..58ede7200cc4f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/OpenFollowerIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/OpenFollowerIndexStep.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetaData; final class OpenFollowerIndexStep extends AsyncActionStep { @@ -20,7 +21,8 @@ final class OpenFollowerIndexStep extends AsyncActionStep { } @Override - public void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, Listener listener) { + public void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, + ClusterStateObserver observer, Listener listener) { OpenIndexRequest request = new OpenIndexRequest(indexMetaData.getIndex().getName()); getClient().admin().indices().open(request, ActionListener.wrap( r -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java index f501c27d8c40e..91b2bbe82648e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; @@ -30,7 +31,8 @@ public RolloverStep(StepKey key, StepKey nextStepKey, Client client) { } @Override - public void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, Listener listener) { + public void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, + ClusterStateObserver observer, Listener listener) { boolean indexingComplete = LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING.get(indexMetaData.getSettings()); if (indexingComplete) { logger.trace(indexMetaData.getIndex() + " has lifecycle complete set, skipping " + RolloverStep.NAME); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStep.java index 973dd7f0ba08d..7973289b968d5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStep.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; @@ -42,7 +43,7 @@ public SetSingleNodeAllocateStep(StepKey key, StepKey nextStepKey, Client client } @Override - public void performAction(IndexMetaData indexMetaData, ClusterState clusterState, Listener listener) { + public void performAction(IndexMetaData indexMetaData, ClusterState clusterState, ClusterStateObserver observer, Listener listener) { RoutingAllocation allocation = new RoutingAllocation(ALLOCATION_DECIDERS, clusterState.getRoutingNodes(), clusterState, null, System.nanoTime()); List validNodeIds = new ArrayList<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStep.java index da59b16ded99b..51497dccc2d4c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStep.java @@ -18,7 +18,7 @@ * Following shrinking an index and deleting the original index, this step creates an alias with the same name as the original index which * points to the new shrunken index to allow clients to continue to use the original index name without being aware that it has shrunk. */ -public class ShrinkSetAliasStep extends AsyncActionStep { +public class ShrinkSetAliasStep extends AsyncRetryDuringSnapshotActionStep { public static final String NAME = "aliases"; private String shrunkIndexPrefix; @@ -32,7 +32,7 @@ String getShrunkIndexPrefix() { } @Override - public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + public void performDuringNoSnapshot(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { // get source index String index = indexMetaData.getIndex().getName(); // get target shrink index diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStep.java index 19ddbfe04cfa2..0e9f84bc3a843 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStep.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; @@ -38,7 +39,7 @@ String getShrunkIndexPrefix() { } @Override - public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, ClusterStateObserver observer, Listener listener) { LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); if (lifecycleState.getLifecycleDate() == null) { throw new IllegalStateException("source index [" + indexMetaData.getIndex().getName() + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStep.java index c66c1427d07d4..9f6b3f7a84f2a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStep.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; @@ -28,7 +29,7 @@ public UpdateSettingsStep(StepKey key, StepKey nextStepKey, Client client, Setti } @Override - public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, ClusterStateObserver observer, Listener listener) { UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indexMetaData.getIndex().getName()).settings(settings); getClient().admin().indices().updateSettings(updateSettingsRequest, ActionListener.wrap(response -> listener.onResponse(true), listener::onFailure)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java index 239e748e58d8a..0e530baa57f0c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java @@ -82,13 +82,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - failedIndexes = in.readList(StreamInput::readString); + failedIndexes = in.readStringList(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeStringList(failedIndexes); + out.writeStringCollection(failedIndexes); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java index 78fcc4939ca36..f4eb7fbc4e53b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java @@ -327,7 +327,7 @@ public void readFrom(StreamInput in) throws IOException { timeout = in.readOptionalTimeValue(); charset = in.readOptionalString(); format = in.readBoolean() ? in.readEnum(FileStructure.Format.class) : null; - columnNames = in.readBoolean() ? in.readList(StreamInput::readString) : null; + columnNames = in.readBoolean() ? in.readStringList() : null; hasHeaderRow = in.readOptionalBoolean(); delimiter = in.readBoolean() ? (char) in.readVInt() : null; quote = in.readBoolean() ? (char) in.readVInt() : null; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index c5dba63fcc6b3..dd3c4618f7025 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -84,7 +84,7 @@ public Request() {} public Request(StreamInput in) throws IOException { super(in); jobId = in.readString(); - expandedJobsIds = in.readList(StreamInput::readString); + expandedJobsIds = in.readStringList(); if (in.getVersion().onOrAfter(Version.V_6_1_0)) { allowNoJobs = in.readBoolean(); } @@ -94,7 +94,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); - out.writeStringList(expandedJobsIds); + out.writeStringCollection(expandedJobsIds); if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeBoolean(allowNoJobs); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java index ef284e1394205..15531fce7a1ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java @@ -91,7 +91,7 @@ static long parseDateOrThrow(String date, ParseField paramName, LongSupplier now DateMathParser dateMathParser = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); try { - return dateMathParser.parse(date, now); + return dateMathParser.parse(date, now).toEpochMilli(); } catch (Exception e) { String msg = Messages.getMessage(Messages.REST_INVALID_DATETIME_PARAMS, paramName.getPreferredName(), date); throw new ElasticsearchParseException(msg, e); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java index d33280dcac3d2..cdfdf06bf85ac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java @@ -162,7 +162,7 @@ static long parseDateOrThrow(String date, ParseField paramName, LongSupplier now DateMathParser dateMathParser = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); try { - return dateMathParser.parse(date, now); + return dateMathParser.parse(date, now).toEpochMilli(); } catch (Exception e) { String msg = Messages.getMessage(Messages.REST_INVALID_DATETIME_PARAMS, paramName.getPreferredName(), date); throw new ElasticsearchParseException(msg, e); @@ -197,7 +197,7 @@ public DatafeedParams(StreamInput in) throws IOException { timeout = TimeValue.timeValueMillis(in.readVLong()); if (in.getVersion().onOrAfter(Version.V_6_6_0)) { jobId = in.readOptionalString(); - datafeedIndices = in.readList(StreamInput::readString); + datafeedIndices = in.readStringList(); } } @@ -274,7 +274,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(timeout.millis()); if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeOptionalString(jobId); - out.writeStringList(datafeedIndices); + out.writeStringCollection(datafeedIndices); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 3002a43e9d8c2..938452d27cc58 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -246,14 +246,14 @@ public DatafeedConfig(StreamInput in) throws IOException { this.queryDelay = in.readOptionalTimeValue(); this.frequency = in.readOptionalTimeValue(); if (in.readBoolean()) { - this.indices = Collections.unmodifiableList(in.readList(StreamInput::readString)); + this.indices = Collections.unmodifiableList(in.readStringList()); } else { this.indices = null; } // This consumes the list of types if there was one. if (in.getVersion().before(Version.V_7_0_0)) { if (in.readBoolean()) { - in.readList(StreamInput::readString); + in.readStringList(); } } if (in.getVersion().before(Version.V_6_6_0)) { @@ -408,7 +408,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalTimeValue(frequency); if (indices != null) { out.writeBoolean(true); - out.writeStringList(indices); + out.writeStringCollection(indices); } else { out.writeBoolean(false); } @@ -416,7 +416,7 @@ public void writeTo(StreamOutput out) throws IOException { // An empty list is expected if (out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(true); - out.writeStringList(Collections.emptyList()); + out.writeStringCollection(Collections.emptyList()); } if (out.getVersion().before(Version.V_6_6_0)) { out.writeNamedWriteable(getParsedQuery()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 8bc49d4598a21..23c2eeccc6a59 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -107,14 +107,14 @@ public DatafeedUpdate(StreamInput in) throws IOException { this.queryDelay = in.readOptionalTimeValue(); this.frequency = in.readOptionalTimeValue(); if (in.readBoolean()) { - this.indices = in.readList(StreamInput::readString); + this.indices = in.readStringList(); } else { this.indices = null; } // This consumes the list of types if there was one. if (in.getVersion().before(Version.V_7_0_0)) { if (in.readBoolean()) { - in.readList(StreamInput::readString); + in.readStringList(); } } this.query = in.readOptionalNamedWriteable(QueryBuilder.class); @@ -148,7 +148,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalTimeValue(frequency); if (indices != null) { out.writeBoolean(true); - out.writeStringList(indices); + out.writeStringCollection(indices); } else { out.writeBoolean(false); } @@ -156,7 +156,7 @@ public void writeTo(StreamOutput out) throws IOException { // An empty list is expected if (out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(true); - out.writeStringList(Collections.emptyList()); + out.writeStringCollection(Collections.emptyList()); } out.writeOptionalNamedWriteable(query); out.writeOptionalWriteable(aggregations); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java index f0ba07ad15c68..bb1faeddd8298 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -22,9 +22,9 @@ import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneOffset; import java.util.Arrays; import java.util.Collection; import java.util.concurrent.TimeUnit; @@ -128,7 +128,7 @@ public static long getHistogramIntervalMillis(AggregationBuilder histogramAggreg * an {@link ElasticsearchException} with the validation error */ private static long validateAndGetDateHistogramInterval(DateHistogramAggregationBuilder dateHistogram) { - if (dateHistogram.timeZone() != null && dateHistogram.timeZone().equals(DateTimeZone.UTC) == false) { + if (dateHistogram.timeZone() != null && dateHistogram.timeZone().normalized().equals(ZoneOffset.UTC) == false) { throw ExceptionsHelper.badRequestException("ML requires date_histogram.time_zone to be UTC"); } @@ -141,7 +141,7 @@ private static long validateAndGetDateHistogramInterval(DateHistogramAggregation public static long validateAndGetCalendarInterval(String calendarInterval) { TimeValue interval; - DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(calendarInterval); + Rounding.DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(calendarInterval); if (dateTimeUnit != null) { switch (dateTimeUnit) { case WEEK_OF_WEEKYEAR: @@ -161,7 +161,7 @@ public static long validateAndGetCalendarInterval(String calendarInterval) { break; case MONTH_OF_YEAR: case YEAR_OF_CENTURY: - case QUARTER: + case QUARTER_OF_YEAR: throw ExceptionsHelper.badRequestException(invalidDateHistogramCalendarIntervalMessage(calendarInterval)); default: throw ExceptionsHelper.badRequestException("Unexpected dateTimeUnit [" + dateTimeUnit + "]"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java index 12c42f3df4d58..acaceace0472b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java @@ -206,20 +206,20 @@ public FileStructure(StreamInput in) throws IOException { format = in.readEnum(Format.class); multilineStartPattern = in.readOptionalString(); excludeLinesPattern = in.readOptionalString(); - columnNames = in.readBoolean() ? Collections.unmodifiableList(in.readList(StreamInput::readString)) : null; + columnNames = in.readBoolean() ? Collections.unmodifiableList(in.readStringList()) : null; hasHeaderRow = in.readOptionalBoolean(); delimiter = in.readBoolean() ? (char) in.readVInt() : null; quote = in.readBoolean() ? (char) in.readVInt() : null; shouldTrimFields = in.readOptionalBoolean(); grokPattern = in.readOptionalString(); - jodaTimestampFormats = in.readBoolean() ? Collections.unmodifiableList(in.readList(StreamInput::readString)) : null; - javaTimestampFormats = in.readBoolean() ? Collections.unmodifiableList(in.readList(StreamInput::readString)) : null; + jodaTimestampFormats = in.readBoolean() ? Collections.unmodifiableList(in.readStringList()) : null; + javaTimestampFormats = in.readBoolean() ? Collections.unmodifiableList(in.readStringList()) : null; timestampField = in.readOptionalString(); needClientTimezone = in.readBoolean(); mappings = Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap())); ingestPipeline = in.readBoolean() ? Collections.unmodifiableMap(in.readMap()) : null; fieldStats = Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap(StreamInput::readString, FieldStats::new))); - explanation = Collections.unmodifiableList(in.readList(StreamInput::readString)); + explanation = Collections.unmodifiableList(in.readStringList()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 7d462bd153371..933188c8221eb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -125,7 +125,7 @@ private AnalysisConfig(TimeValue bucketSpan, String categorizationFieldName, Lis public AnalysisConfig(StreamInput in) throws IOException { bucketSpan = in.readTimeValue(); categorizationFieldName = in.readOptionalString(); - categorizationFilters = in.readBoolean() ? Collections.unmodifiableList(in.readList(StreamInput::readString)) : null; + categorizationFilters = in.readBoolean() ? Collections.unmodifiableList(in.readStringList()) : null; if (in.getVersion().onOrAfter(Version.V_6_2_0)) { categorizationAnalyzerConfig = in.readOptionalWriteable(CategorizationAnalyzerConfig::new); } else { @@ -134,7 +134,7 @@ public AnalysisConfig(StreamInput in) throws IOException { latency = in.readOptionalTimeValue(); summaryCountFieldName = in.readOptionalString(); detectors = Collections.unmodifiableList(in.readList(Detector::new)); - influencers = Collections.unmodifiableList(in.readList(StreamInput::readString)); + influencers = Collections.unmodifiableList(in.readStringList()); multivariateByFields = in.readOptionalBoolean(); } @@ -145,7 +145,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(categorizationFieldName); if (categorizationFilters != null) { out.writeBoolean(true); - out.writeStringList(categorizationFilters); + out.writeStringCollection(categorizationFilters); } else { out.writeBoolean(false); } @@ -155,7 +155,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalTimeValue(latency); out.writeOptionalString(summaryCountFieldName); out.writeList(detectors); - out.writeStringList(influencers); + out.writeStringCollection(influencers); out.writeOptionalBoolean(multivariateByFields); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index 314a533914a6a..a912b5d65f208 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -187,7 +187,7 @@ public Job(StreamInput in) throws IOException { jobType = in.readString(); jobVersion = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - groups = Collections.unmodifiableList(in.readList(StreamInput::readString)); + groups = Collections.unmodifiableList(in.readStringList()); } else { groups = Collections.emptyList(); } @@ -444,7 +444,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeStringList(groups); + out.writeStringCollection(groups); } out.writeOptionalString(description); out.writeVLong(createTime.getTime()); @@ -671,7 +671,7 @@ public Builder(StreamInput in) throws IOException { jobType = in.readString(); jobVersion = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - groups = in.readList(StreamInput::readString); + groups = in.readStringList(); } else { groups = Collections.emptyList(); } @@ -856,7 +856,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeStringList(groups); + out.writeStringCollection(groups); } out.writeOptionalString(description); if (createTime != null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index a0519697e5909..36e1fc1096675 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -125,7 +125,7 @@ public JobUpdate(StreamInput in) throws IOException { modelSnapshotRetentionDays = in.readOptionalLong(); resultsRetentionDays = in.readOptionalLong(); if (in.readBoolean()) { - categorizationFilters = in.readList(StreamInput::readString); + categorizationFilters = in.readStringList(); } else { categorizationFilters = null; } @@ -172,7 +172,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalLong(resultsRetentionDays); out.writeBoolean(categorizationFilters != null); if (categorizationFilters != null) { - out.writeStringList(categorizationFilters); + out.writeStringCollection(categorizationFilters); } out.writeMap(customSettings); out.writeOptionalString(modelSnapshotId); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index fb0db771fa581..0eb2e666916dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -5,8 +5,24 @@ */ package org.elasticsearch.xpack.core.ml.job.persistence; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig; @@ -38,10 +54,16 @@ import org.elasticsearch.xpack.core.ml.notifications.AuditMessage; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; /** * Static methods to create Elasticsearch index mappings for the autodetect @@ -107,6 +129,8 @@ public class ElasticsearchMappings { static final String RAW = "raw"; + private static final Logger logger = LogManager.getLogger(ElasticsearchMappings.class); + private ElasticsearchMappings() { } @@ -964,4 +988,94 @@ public static XContentBuilder auditMessageMapping() throws IOException { .endObject() .endObject(); } + + static String[] mappingRequiresUpdate(ClusterState state, String[] concreteIndices, Version minVersion) throws IOException { + List indicesToUpdate = new ArrayList<>(); + + ImmutableOpenMap> currentMapping = state.metaData().findMappings(concreteIndices, + new String[] {DOC_TYPE}, MapperPlugin.NOOP_FIELD_FILTER); + + for (String index : concreteIndices) { + ImmutableOpenMap innerMap = currentMapping.get(index); + if (innerMap != null) { + MappingMetaData metaData = innerMap.get(DOC_TYPE); + try { + @SuppressWarnings("unchecked") + Map meta = (Map) metaData.sourceAsMap().get("_meta"); + if (meta != null) { + String versionString = (String) meta.get("version"); + if (versionString == null) { + logger.info("Version of mappings for [{}] not found, recreating", index); + indicesToUpdate.add(index); + continue; + } + + Version mappingVersion = Version.fromString(versionString); + + if (mappingVersion.onOrAfter(minVersion)) { + continue; + } else { + logger.info("Mappings for [{}] are outdated [{}], updating it[{}].", index, mappingVersion, Version.CURRENT); + indicesToUpdate.add(index); + continue; + } + } else { + logger.info("Version of mappings for [{}] not found, recreating", index); + indicesToUpdate.add(index); + continue; + } + } catch (Exception e) { + logger.error(new ParameterizedMessage("Failed to retrieve mapping version for [{}], recreating", index), e); + indicesToUpdate.add(index); + continue; + } + } else { + logger.info("No mappings found for [{}], recreating", index); + indicesToUpdate.add(index); + } + } + return indicesToUpdate.toArray(new String[indicesToUpdate.size()]); + } + + public static void addDocMappingIfMissing(String alias, CheckedSupplier mappingSupplier, + Client client, ClusterState state, ActionListener listener) { + AliasOrIndex aliasOrIndex = state.metaData().getAliasAndIndexLookup().get(alias); + if (aliasOrIndex == null) { + // The index has never been created yet + listener.onResponse(true); + return; + } + String[] concreteIndices = aliasOrIndex.getIndices().stream().map(IndexMetaData::getIndex).map(Index::getName) + .toArray(String[]::new); + + String[] indicesThatRequireAnUpdate; + try { + indicesThatRequireAnUpdate = mappingRequiresUpdate(state, concreteIndices, Version.CURRENT); + } catch (IOException e) { + listener.onFailure(e); + return; + } + + if (indicesThatRequireAnUpdate.length > 0) { + try (XContentBuilder mapping = mappingSupplier.get()) { + PutMappingRequest putMappingRequest = new PutMappingRequest(indicesThatRequireAnUpdate); + putMappingRequest.type(DOC_TYPE); + putMappingRequest.source(mapping); + executeAsyncWithOrigin(client, ML_ORIGIN, PutMappingAction.INSTANCE, putMappingRequest, + ActionListener.wrap(response -> { + if (response.isAcknowledged()) { + listener.onResponse(true); + } else { + listener.onFailure(new ElasticsearchException("Attempt to put missing mapping in indices " + + Arrays.toString(indicesThatRequireAnUpdate) + " was not acknowledged")); + } + }, listener::onFailure)); + } catch (IOException e) { + listener.onFailure(e); + } + } else { + logger.trace("Mappings are up to date."); + listener.onResponse(true); + } + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java index 793968802c23a..d335ba39e0026 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -139,7 +139,7 @@ public Bucket(StreamInput in) throws IOException { in.readList(Bucket::readOldPerPartitionNormalization); } if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - scheduledEvents = in.readList(StreamInput::readString); + scheduledEvents = in.readStringList(); if (scheduledEvents.isEmpty()) { scheduledEvents = Collections.emptyList(); } @@ -165,7 +165,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeList(Collections.emptyList()); } if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeStringList(scheduledEvents); + out.writeStringCollection(scheduledEvents); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java index 7d5fb0a1bae0c..576bed5dcea2f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java @@ -77,7 +77,7 @@ public CategoryDefinition(StreamInput in) throws IOException { terms = in.readString(); regex = in.readString(); maxMatchingLength = in.readLong(); - examples = new TreeSet<>(in.readList(StreamInput::readString)); + examples = new TreeSet<>(in.readStringList()); if (in.getVersion().onOrAfter(Version.V_6_4_0)) { grokPattern = in.readOptionalString(); } @@ -90,7 +90,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(terms); out.writeString(regex); out.writeLong(maxMatchingLength); - out.writeStringList(new ArrayList<>(examples)); + out.writeStringCollection(examples); if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeOptionalString(grokPattern); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java index a9daa78a6362a..506fbff640f75 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java @@ -124,7 +124,7 @@ public ForecastRequestStats(StreamInput in) throws IOException { forecastId = in.readString(); recordCount = in.readLong(); if (in.readBoolean()) { - messages = in.readList(StreamInput::readString); + messages = in.readStringList(); } else { messages = null; } @@ -147,7 +147,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recordCount); if (messages != null) { out.writeBoolean(true); - out.writeStringList(messages); + out.writeStringCollection(messages); } else { out.writeBoolean(false); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java index 6434b0ff2b98d..ea0994dad717c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.ml.utils.time; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; @@ -15,6 +16,7 @@ import java.util.concurrent.TimeUnit; public final class TimeUtils { + private TimeUtils() { // Do nothing } @@ -55,7 +57,7 @@ public static long dateStringToEpoch(String date) { try { return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(date); - } catch (IllegalArgumentException e) { + } catch (ElasticsearchParseException | IllegalArgumentException e) { } // Could not do the conversion return -1; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java index 166322b93722c..f4fee8acc3d1f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java @@ -9,12 +9,11 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -22,9 +21,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Map; import java.util.Objects; @@ -82,7 +81,7 @@ public DateHistogramGroupConfig(final String field, final DateHistogramInterval * The {@code field} and {@code interval} are required to compute the date histogram for the rolled up documents. * The {@code delay} is optional and can be set to {@code null}. It defines how long to wait before rolling up new documents. * The {@code timeZone} is optional and can be set to {@code null}. When configured, the time zone value is resolved using - * ({@link DateTimeZone#forID(String)} and must match a time zone identifier provided by the Joda Time library. + * ({@link ZoneId#of(String)} and must match a time zone identifier. *

* @param field the name of the date field to use for the date histogram (required) * @param interval the interval to use for the date histogram (required) @@ -229,23 +228,14 @@ public static DateHistogramGroupConfig fromXContent(final XContentParser parser) } private static Rounding createRounding(final String expr, final String timeZone) { - DateTimeUnit timeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(expr); + Rounding.DateTimeUnit timeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(expr); final Rounding.Builder rounding; if (timeUnit != null) { rounding = new Rounding.Builder(timeUnit); } else { rounding = new Rounding.Builder(TimeValue.parseTimeValue(expr, "createRounding")); } - rounding.timeZone(toDateTimeZone(timeZone)); + rounding.timeZone(ZoneId.of(timeZone)); return rounding.build(); } - - private static DateTimeZone toDateTimeZone(final String timezone) { - try { - return DateTimeZone.forOffsetHours(Integer.parseInt(timezone)); - } catch (NumberFormatException e) { - return DateTimeZone.forID(timezone); - } - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java index 46f0c7397c6e5..47b45dc69dd2d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java @@ -86,7 +86,7 @@ public MetricConfig(final String field, final List metrics) { MetricConfig(final StreamInput in) throws IOException { field = in.readString(); - metrics = in.readList(StreamInput::readString); + metrics = in.readStringList(); } /** @@ -144,7 +144,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(field); - out.writeStringList(metrics); + out.writeStringCollection(metrics); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java index 6d4a3f1ad44d0..0e1652af95b2e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java @@ -49,12 +49,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(created, StreamOutput::writeString, StreamOutput::writeStringList); + out.writeMap(created, StreamOutput::writeString, StreamOutput::writeStringCollection); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - this.created = Collections.unmodifiableMap(in.readMap(StreamInput::readString, si -> si.readList(StreamInput::readString))); + this.created = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readStringList)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java index 19a84525c3011..087e29ec8b56a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java @@ -125,7 +125,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); this.name = in.readString(); this.enabled = in.readBoolean(); - this.roles = in.readList(StreamInput::readString); + this.roles = in.readStringList(); this.rules = ExpressionParser.readExpression(in); this.metadata = in.readMap(); this.refreshPolicy = RefreshPolicy.readFrom(in); @@ -136,7 +136,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(name); out.writeBoolean(enabled); - out.writeStringList(roles); + out.writeStringCollection(roles); ExpressionParser.writeExpression(rules, out); out.writeMap(metadata); refreshPolicy.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java index 11003b4ce30e0..b6d84d766c328 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java @@ -145,7 +145,7 @@ public Indices(StreamInput in) throws IOException { return new FieldPermissionsDefinition.FieldGrantExcludeGroup(grant, exclude); })); queries = Collections.unmodifiableSet(in.readSet(StreamInput::readBytesReference)); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { this.allowRestrictedIndices = in.readBoolean(); } else { this.allowRestrictedIndices = false; @@ -254,7 +254,7 @@ public void writeTo(StreamOutput out) throws IOException { output.writeOptionalStringArray(fields.getExcludedFields()); }); out.writeCollection(queries, StreamOutput::writeBytesReference); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeBoolean(allowRestrictedIndices); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java index 117ef3316e1b9..f6e7965d9630b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java @@ -49,8 +49,8 @@ public TokensInvalidationResult(List invalidatedTokens, List pre } public TokensInvalidationResult(StreamInput in) throws IOException { - this.invalidatedTokens = in.readList(StreamInput::readString); - this.previouslyInvalidatedTokens = in.readList(StreamInput::readString); + this.invalidatedTokens = in.readStringList(); + this.previouslyInvalidatedTokens = in.readStringList(); this.errors = in.readList(StreamInput::readException); this.attemptCount = in.readVInt(); } @@ -97,8 +97,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeStringList(invalidatedTokens); - out.writeStringList(previouslyInvalidatedTokens); + out.writeStringCollection(invalidatedTokens); + out.writeStringCollection(previouslyInvalidatedTokens); out.writeCollection(errors, StreamOutput::writeException); out.writeVInt(attemptCount); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java index 8e3f8e5593df1..95d1e9fa77149 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -78,7 +78,7 @@ public ExpressionRoleMapping(String name, RoleMapperExpression expr, List initializeReservedRoles() { .put("superuser", SUPERUSER_ROLE_DESCRIPTOR) .put("transport_client", new RoleDescriptor("transport_client", new String[] { "transport_client" }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) - .put("kibana_user", new RoleDescriptor("kibana_user", null, new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*").privileges("manage", "read", "index", "delete") - .build() }, new RoleDescriptor.ApplicationResourcePrivileges[] { + .put("kibana_user", new RoleDescriptor("kibana_user", null, null, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder() .application("kibana-.kibana").resources("*").privileges("all").build() }, null, null, @@ -97,10 +95,7 @@ private static Map initializeReservedRoles() { .put("kibana_dashboard_only_user", new RoleDescriptor( "kibana_dashboard_only_user", null, - new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder() - .indices(".kibana*").privileges("read", "view_index_metadata").build() - }, + null, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder() .application("kibana-.kibana").resources("*").privileges("read").build() }, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java index f81c7955abbc4..e67baeaad3916 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java @@ -91,7 +91,7 @@ public static DateTime parseDateMathOrNull(String fieldName, XContentParser pars } public static DateTime parseDateMath(String valueString, DateTimeZone timeZone, final Clock clock) { - return new DateTime(dateMathParser.parse(valueString, clock::millis), timeZone); + return new DateTime(dateMathParser.parse(valueString, clock::millis).toEpochMilli(), timeZone); } public static DateTime parseDate(String fieldName, XContentParser parser, DateTimeZone timeZone) throws IOException { diff --git a/x-pack/plugin/core/src/main/resources/watch-history-ilm-policy.json b/x-pack/plugin/core/src/main/resources/watch-history-ilm-policy.json new file mode 100644 index 0000000000000..e45e6b25e8f7b --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/watch-history-ilm-policy.json @@ -0,0 +1,10 @@ +{ + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/watch-history.json b/x-pack/plugin/core/src/main/resources/watch-history.json index 9a4a96409b043..db6fd4aff950a 100644 --- a/x-pack/plugin/core/src/main/resources/watch-history.json +++ b/x-pack/plugin/core/src/main/resources/watch-history.json @@ -5,6 +5,7 @@ "index.number_of_shards": 1, "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", + "index.lifecycle.name": "watch-history-ilm-policy", "index.format": 6 }, "mappings": { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java index 6812aca474749..2219a78055544 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java @@ -15,9 +15,9 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneOffset; public class RewriteCachingDirectoryReaderTests extends ESTestCase { @@ -92,15 +92,15 @@ public void testIsWithinQuery() throws IOException { dateFieldType.setName("test"); QueryRewriteContext context = new QueryRewriteContext(xContentRegistry(), writableRegistry(), null, () -> 0); MappedFieldType.Relation relation = dateFieldType.isFieldWithinQuery(cachingDirectoryReader, 0, 10, - true, true, DateTimeZone.UTC, null, context); + true, true, ZoneOffset.UTC, null, context); assertEquals(relation, MappedFieldType.Relation.WITHIN); relation = dateFieldType.isFieldWithinQuery(cachingDirectoryReader, 3, 11, - true, true, DateTimeZone.UTC, null, context); + true, true, ZoneOffset.UTC, null, context); assertEquals(relation, MappedFieldType.Relation.INTERSECTS); relation = dateFieldType.isFieldWithinQuery(cachingDirectoryReader, 10, 11, - false, true, DateTimeZone.UTC, null, context); + false, true, ZoneOffset.UTC, null, context); assertEquals(relation, MappedFieldType.Relation.DISJOINT); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java deleted file mode 100644 index 69710b24bb230..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.license; - -import org.elasticsearch.analysis.common.CommonAnalysisPlugin; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.discovery.TestZenDiscovery; -import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; - -import java.util.Arrays; -import java.util.Collection; - -import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; - -@ESIntegTestCase.ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0, transportClientRatio = 0, - autoMinMasterNodes = false) -public class LicenseServiceClusterNotRecoveredTests extends AbstractLicensesIntegrationTestCase { - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return nodeSettingsBuilder(nodeOrdinal).build(); - } - - @Override - protected boolean addMockHttpTransport() { - return false; - } - - private Settings.Builder nodeSettingsBuilder(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("node.data", true) - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // this test is just weird - .put("resource.reload.interval.high", "500ms"); // for license mode file watcher - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class, Netty4Plugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - - public void testClusterNotRecovered() throws Exception { - logger.info("--> start one master out of two [recovery state]"); - internalCluster().startNode(nodeSettingsBuilder(0).put("discovery.zen.minimum_master_nodes", 2).put("node.master", true)); - logger.info("--> start second master out of two [recovered state]"); - internalCluster().startNode(nodeSettingsBuilder(1).put("discovery.zen.minimum_master_nodes", 2).put("node.master", true)); - assertLicenseActive(true); - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java index 230fd75fbb958..47dad7e18eb32 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java @@ -29,6 +29,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.List; import java.util.UUID; @@ -51,11 +52,11 @@ public class TestUtils { private static final DateMathParser dateMathParser = formatDateTimeFormatter.toDateMathParser(); public static String dateMathString(String time, final long now) { - return formatDateTimeFormatter.formatMillis(dateMathParser.parse(time, () -> now)); + return formatDateTimeFormatter.format(dateMathParser.parse(time, () -> now).atZone(ZoneOffset.UTC)); } public static long dateMath(String time, final long now) { - return dateMathParser.parse(time, () -> now); + return dateMathParser.parse(time, () -> now).toEpochMilli(); } public static LicenseSpec generateRandomLicenseSpec(int version) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractUnfollowIndexStepTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractUnfollowIndexStepTestCase.java index 5ceb8ca657006..7ec81b5bae262 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractUnfollowIndexStepTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractUnfollowIndexStepTestCase.java @@ -53,7 +53,7 @@ public final void testNotAFollowerIndex() { Boolean[] completed = new Boolean[1]; Exception[] failure = new Exception[1]; - step.performAction(indexMetadata, null, new AsyncActionStep.Listener() { + step.performAction(indexMetadata, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { completed[0] = complete; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java index 528021189e107..25e1c4e481bba 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java @@ -23,12 +23,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; -public class CloseFollowerIndexStepTests extends AbstractUnfollowIndexStepTestCase { - - @Override - protected CloseFollowerIndexStep newInstance(Step.StepKey key, Step.StepKey nextKey, Client client) { - return new CloseFollowerIndexStep(key, nextKey, client); - } +public class CloseFollowerIndexStepTests extends AbstractStepTestCase { public void testCloseFollowingIndex() { IndexMetaData indexMetadata = IndexMetaData.builder("follower-index") @@ -56,7 +51,7 @@ public void testCloseFollowingIndex() { Boolean[] completed = new Boolean[1]; Exception[] failure = new Exception[1]; CloseFollowerIndexStep step = new CloseFollowerIndexStep(randomStepKey(), randomStepKey(), client); - step.performAction(indexMetadata, null, new AsyncActionStep.Listener() { + step.performAction(indexMetadata, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { completed[0] = complete; @@ -98,7 +93,7 @@ public void testCloseFollowingIndexFailed() { Boolean[] completed = new Boolean[1]; Exception[] failure = new Exception[1]; CloseFollowerIndexStep step = new CloseFollowerIndexStep(randomStepKey(), randomStepKey(), client); - step.performAction(indexMetadata, null, new AsyncActionStep.Listener() { + step.performAction(indexMetadata, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { completed[0] = complete; @@ -114,4 +109,30 @@ public void onFailure(Exception e) { Mockito.verify(indicesClient).close(Mockito.any(), Mockito.any()); Mockito.verifyNoMoreInteractions(indicesClient); } + + @Override + protected CloseFollowerIndexStep createRandomInstance() { + Step.StepKey stepKey = randomStepKey(); + Step.StepKey nextStepKey = randomStepKey(); + return new CloseFollowerIndexStep(stepKey, nextStepKey, Mockito.mock(Client.class)); + } + + @Override + protected CloseFollowerIndexStep mutateInstance(CloseFollowerIndexStep instance) { + Step.StepKey key = instance.getKey(); + Step.StepKey nextKey = instance.getNextStepKey(); + + if (randomBoolean()) { + key = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } else { + nextKey = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } + + return new CloseFollowerIndexStep(key, nextKey, instance.getClient()); + } + + @Override + protected CloseFollowerIndexStep copyInstance(CloseFollowerIndexStep instance) { + return new CloseFollowerIndexStep(instance.getKey(), instance.getNextStepKey(), instance.getClient()); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStepTests.java index c85df6de659e8..405a39cd5906c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStepTests.java @@ -91,7 +91,7 @@ public void testDeleted() { SetOnce actionCompleted = new SetOnce<>(); DeleteStep step = createRandomInstance(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { actionCompleted.set(complete); @@ -138,7 +138,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { SetOnce exceptionThrown = new SetOnce<>(); DeleteStep step = createRandomInstance(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { throw new AssertionError("Unexpected method call"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStepTests.java index 9a38ddf3a2677..4ce895ae349ea 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStepTests.java @@ -88,7 +88,7 @@ public void testPerformActionComplete() { ForceMergeStep step = new ForceMergeStep(stepKey, nextStepKey, client, maxNumSegments); SetOnce completed = new SetOnce<>(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { completed.set(complete); @@ -129,7 +129,7 @@ public void testPerformActionThrowsException() { ForceMergeStep step = new ForceMergeStep(stepKey, nextStepKey, client, maxNumSegments); SetOnce exceptionThrown = new SetOnce<>(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { throw new AssertionError("unexpected method call"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStepTests.java index 94ca2c2635c63..0198ed7abee7c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStepTests.java @@ -92,7 +92,7 @@ public void testFreeze() { SetOnce actionCompleted = new SetOnce<>(); FreezeStep step = createRandomInstance(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { actionCompleted.set(complete); @@ -135,7 +135,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { SetOnce exceptionThrown = new SetOnce<>(); FreezeStep step = createRandomInstance(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { throw new AssertionError("Unexpected method call"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/OpenFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/OpenFollowerIndexStepTests.java index 2d5086ec88fac..027b5c811cead 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/OpenFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/OpenFollowerIndexStepTests.java @@ -77,7 +77,7 @@ public void testOpenFollowingIndex() { Boolean[] completed = new Boolean[1]; Exception[] failure = new Exception[1]; OpenFollowerIndexStep step = new OpenFollowerIndexStep(randomStepKey(), randomStepKey(), client); - step.performAction(indexMetadata, null, new AsyncActionStep.Listener() { + step.performAction(indexMetadata, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { completed[0] = complete; @@ -118,7 +118,7 @@ public void testOpenFollowingIndexFailed() { Boolean[] completed = new Boolean[1]; Exception[] failure = new Exception[1]; OpenFollowerIndexStep step = new OpenFollowerIndexStep(randomStepKey(), randomStepKey(), client); - step.performAction(indexMetadata, null, new AsyncActionStep.Listener() { + step.performAction(indexMetadata, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { completed[0] = complete; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PauseFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PauseFollowerIndexStepTests.java index fa877ef080ff4..f1977bdbd8577 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PauseFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PauseFollowerIndexStepTests.java @@ -56,7 +56,7 @@ public void testPauseFollowingIndex() { Boolean[] completed = new Boolean[1]; Exception[] failure = new Exception[1]; PauseFollowerIndexStep step = new PauseFollowerIndexStep(randomStepKey(), randomStepKey(), client); - step.performAction(indexMetadata, null, new AsyncActionStep.Listener() { + step.performAction(indexMetadata, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { completed[0] = complete; @@ -93,7 +93,7 @@ public void testPauseFollowingIndexFailed() { Boolean[] completed = new Boolean[1]; Exception[] failure = new Exception[1]; PauseFollowerIndexStep step = new PauseFollowerIndexStep(randomStepKey(), randomStepKey(), client); - step.performAction(indexMetadata, null, new AsyncActionStep.Listener() { + step.performAction(indexMetadata, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { completed[0] = complete; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java index 5f65b581e00b3..007f0c2865db2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java @@ -106,7 +106,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); SetOnce actionCompleted = new SetOnce<>(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { @@ -138,7 +138,7 @@ public void testPerformActionWithIndexingComplete() { RolloverStep step = createRandomInstance(); SetOnce actionCompleted = new SetOnce<>(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { @@ -183,7 +183,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); SetOnce exceptionThrown = new SetOnce<>(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { @@ -212,7 +212,7 @@ public void testPerformActionInvalidNullOrEmptyAlias() { RolloverStep step = createRandomInstance(); SetOnce exceptionThrown = new SetOnce<>(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { throw new AssertionError("Unexpected method call"); @@ -237,7 +237,7 @@ public void testPerformActionAliasDoesNotPointToIndex() { RolloverStep step = createRandomInstance(); SetOnce exceptionThrown = new SetOnce<>(); - step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + step.performAction(indexMetaData, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { throw new AssertionError("Unexpected method call"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityActionTests.java index 34634c3972d56..c0848f5326c40 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityActionTests.java @@ -38,6 +38,7 @@ protected Reader instanceReader() { return SetPriorityAction::new; } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/37652") public void testNonPositivePriority() { Exception e = expectThrows(Exception.class, () -> new SetPriorityAction(randomIntBetween(-100, 0))); assertThat(e.getMessage(), equalTo("[priority] must be 0 or greater")); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java index b42ada6956f87..525744d68af10 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java @@ -262,7 +262,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).updateSettings(Mockito.any(), Mockito.any()); SetOnce exceptionThrown = new SetOnce<>(); - step.performAction(indexMetaData, clusterState, new Listener() { + step.performAction(indexMetaData, clusterState, null, new Listener() { @Override public void onResponse(boolean complete) { @@ -320,7 +320,7 @@ public void testPerformActionAttrsNoShard() { SetSingleNodeAllocateStep step = createRandomInstance(); SetOnce exceptionThrown = new SetOnce<>(); - step.performAction(indexMetaData, clusterState, new Listener() { + step.performAction(indexMetaData, clusterState, null, new Listener() { @Override public void onResponse(boolean complete) { @@ -375,7 +375,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { SetOnce actionCompleted = new SetOnce<>(); - step.performAction(indexMetaData, clusterState, new Listener() { + step.performAction(indexMetaData, clusterState, null, new Listener() { @Override public void onResponse(boolean complete) { @@ -407,7 +407,7 @@ private void assertNoValidNode(IndexMetaData indexMetaData, Index index, Discove SetOnce actionCompleted = new SetOnce<>(); - step.performAction(indexMetaData, clusterState, new Listener() { + step.performAction(indexMetaData, clusterState, null, new Listener() { @Override public void onResponse(boolean complete) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStepTests.java index a5c0e4d7146bc..d1911000f6786 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStepTests.java @@ -119,7 +119,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).aliases(Mockito.any(), Mockito.any()); SetOnce actionCompleted = new SetOnce<>(); - step.performAction(indexMetaData, null, new Listener() { + step.performAction(indexMetaData, null, null, new Listener() { @Override public void onResponse(boolean complete) { @@ -163,7 +163,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).aliases(Mockito.any(), Mockito.any()); SetOnce exceptionThrown = new SetOnce<>(); - step.performAction(indexMetaData, null, new Listener() { + step.performAction(indexMetaData, null, null, new Listener() { @Override public void onResponse(boolean complete) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStepTests.java index 0cd655cb9d60f..e9ceccc4e279c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStepTests.java @@ -127,7 +127,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); SetOnce actionCompleted = new SetOnce<>(); - step.performAction(sourceIndexMetaData, null, new Listener() { + step.performAction(sourceIndexMetaData, null, null, new Listener() { @Override public void onResponse(boolean complete) { @@ -173,7 +173,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); SetOnce actionCompleted = new SetOnce<>(); - step.performAction(indexMetaData, null, new Listener() { + step.performAction(indexMetaData, null, null, new Listener() { @Override public void onResponse(boolean complete) { @@ -220,7 +220,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); SetOnce exceptionThrown = new SetOnce<>(); - step.performAction(indexMetaData, null, new Listener() { + step.performAction(indexMetaData, null, null, new Listener() { @Override public void onResponse(boolean complete) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStepTests.java index 58558c92d2511..e92f1dce6a477 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStepTests.java @@ -56,7 +56,7 @@ public void testUnFollow() { Boolean[] completed = new Boolean[1]; Exception[] failure = new Exception[1]; UnfollowFollowIndexStep step = new UnfollowFollowIndexStep(randomStepKey(), randomStepKey(), client); - step.performAction(indexMetadata, null, new AsyncActionStep.Listener() { + step.performAction(indexMetadata, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { completed[0] = complete; @@ -98,7 +98,7 @@ public void testUnFollowUnfollowFailed() { Boolean[] completed = new Boolean[1]; Exception[] failure = new Exception[1]; UnfollowFollowIndexStep step = new UnfollowFollowIndexStep(randomStepKey(), randomStepKey(), client); - step.performAction(indexMetadata, null, new AsyncActionStep.Listener() { + step.performAction(indexMetadata, null, null, new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { completed[0] = complete; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStepTests.java index 22908146af21f..28683fa86291c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStepTests.java @@ -100,7 +100,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { SetOnce actionCompleted = new SetOnce<>(); - step.performAction(indexMetaData, null, new Listener() { + step.performAction(indexMetaData, null, null, new Listener() { @Override public void onResponse(boolean complete) { @@ -147,7 +147,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).updateSettings(Mockito.any(), Mockito.any()); SetOnce exceptionThrown = new SetOnce<>(); - step.performAction(indexMetaData, null, new Listener() { + step.performAction(indexMetaData, null, null, new Listener() { @Override public void onResponse(boolean complete) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index cb2f13e804253..788870013885e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -48,15 +48,15 @@ import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig.Mode; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.TimeZone; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -449,7 +449,7 @@ public void testBuild_GivenHistogramWithDefaultInterval() { public void testBuild_GivenDateHistogramWithInvalidTimeZone() { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("bucket").field("time") - .interval(300000L).timeZone(DateTimeZone.forTimeZone(TimeZone.getTimeZone("EST"))).subAggregation(maxTime); + .interval(300000L).timeZone(ZoneId.of("CET")).subAggregation(maxTime); ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> createDatafeedWithDateHistogram(dateHistogram)); @@ -650,7 +650,7 @@ public void testSerializationOfComplexAggs() throws IOException { new Script("params.bytes > 0 ? params.bytes : null")); DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("histogram_buckets") - .field("timestamp").interval(300000).timeZone(DateTimeZone.UTC) + .field("timestamp").interval(300000).timeZone(ZoneOffset.UTC) .subAggregation(maxTime) .subAggregation(avgAggregationBuilder) .subAggregation(derivativePipelineAggregationBuilder) @@ -701,7 +701,7 @@ public void testSerializationOfComplexAggsBetweenVersions() throws IOException { new Script("params.bytes > 0 ? params.bytes : null")); DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("histogram_buckets") - .field("timestamp").interval(300000).timeZone(DateTimeZone.UTC) + .field("timestamp").interval(300000).timeZone(ZoneOffset.UTC) .subAggregation(maxTime) .subAggregation(avgAggregationBuilder) .subAggregation(derivativePipelineAggregationBuilder) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java index 7770def0fae9a..532468216e5aa 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java @@ -14,11 +14,12 @@ import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; -import java.util.TimeZone; +import java.time.ZoneId; +import java.time.ZoneOffset; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class ExtractorUtilsTests extends ESTestCase { @@ -73,13 +74,21 @@ public void testGetHistogramAggregation_MissingHistogramAgg() { public void testGetHistogramIntervalMillis_GivenDateHistogramWithInvalidTimeZone() { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("bucket").field("time") - .interval(300000L).timeZone(DateTimeZone.forTimeZone(TimeZone.getTimeZone("EST"))).subAggregation(maxTime); + .interval(300000L).timeZone(ZoneId.of("CET")).subAggregation(maxTime); ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> ExtractorUtils.getHistogramIntervalMillis(dateHistogram)); assertThat(e.getMessage(), equalTo("ML requires date_histogram.time_zone to be UTC")); } + public void testGetHistogramIntervalMillis_GivenUtcTimeZones() { + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + ZoneId zone = randomFrom(ZoneOffset.UTC, ZoneId.of("UTC")); + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("bucket").field("time") + .interval(300000L).timeZone(zone).subAggregation(maxTime); + assertThat(ExtractorUtils.getHistogramIntervalMillis(dateHistogram), is(300_000L)); + } + public void testIsHistogram() { assertTrue(ExtractorUtils.isHistogram(AggregationBuilders.dateHistogram("time"))); assertTrue(ExtractorUtils.isHistogram(AggregationBuilders.histogram("time"))); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java index e4ce536a3ccf6..e87515afadd1d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java @@ -9,10 +9,18 @@ import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; @@ -30,6 +38,8 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -128,6 +138,96 @@ public void testTermFieldMapping() throws IOException { assertNull(instanceMapping); } + + public void testMappingRequiresUpdateNoMapping() throws IOException { + ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); + ClusterState cs = csBuilder.build(); + String[] indices = new String[] { "no_index" }; + + assertArrayEquals(new String[] { "no_index" }, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateNullMapping() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("null_mapping", null)); + String[] indices = new String[] { "null_index" }; + assertArrayEquals(indices, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateNoVersion() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("no_version_field", "NO_VERSION_FIELD")); + String[] indices = new String[] { "no_version_field" }; + assertArrayEquals(indices, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateRecentMappingVersion() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_current", Version.CURRENT.toString())); + String[] indices = new String[] { "version_current" }; + assertArrayEquals(new String[] {}, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateMaliciousMappingVersion() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData( + Collections.singletonMap("version_current", Collections.singletonMap("nested", "1.0"))); + String[] indices = new String[] { "version_nested" }; + assertArrayEquals(indices, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateBogusMappingVersion() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_bogus", "0.0")); + String[] indices = new String[] { "version_bogus" }; + assertArrayEquals(indices, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateNewerMappingVersion() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_newer", Version.CURRENT)); + String[] indices = new String[] { "version_newer" }; + assertArrayEquals(new String[] {}, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousVersion())); + } + + public void testMappingRequiresUpdateNewerMappingVersionMinor() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_newer_minor", Version.CURRENT)); + String[] indices = new String[] { "version_newer_minor" }; + assertArrayEquals(new String[] {}, + ElasticsearchMappings.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousMinorVersion())); + } + + + private ClusterState getClusterStateWithMappingsWithMetaData(Map namesAndVersions) throws IOException { + MetaData.Builder metaDataBuilder = MetaData.builder(); + + for (Map.Entry entry : namesAndVersions.entrySet()) { + + String indexName = entry.getKey(); + Object version = entry.getValue(); + + IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName); + indexMetaData.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + + Map mapping = new HashMap<>(); + Map properties = new HashMap<>(); + for (int i = 0; i < 10; i++) { + properties.put("field" + i, Collections.singletonMap("type", "string")); + } + mapping.put("properties", properties); + + Map meta = new HashMap<>(); + if (version != null && version.equals("NO_VERSION_FIELD") == false) { + meta.put("version", version); + } + mapping.put("_meta", meta); + + indexMetaData.putMapping(new MappingMetaData(ElasticsearchMappings.DOC_TYPE, mapping)); + + metaDataBuilder.put(indexMetaData); + } + MetaData metaData = metaDataBuilder.build(); + + ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); + csBuilder.metaData(metaData); + return csBuilder.build(); + } + private Set collectResultsDocFieldNames() throws IOException { // Only the mappings for the results index should be added below. Do NOT add mappings for other indexes here. return collectFieldNames(ElasticsearchMappings.resultsMapping()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java index d892eb550a17a..605ea6e901a90 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java @@ -28,7 +28,7 @@ import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; import static com.carrotsearch.randomizedtesting.generators.RandomPicks.randomFrom; import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiAlphanumOfLengthBetween; -import static org.elasticsearch.test.ESTestCase.randomDateTimeZone; +import static org.elasticsearch.test.ESTestCase.randomZone; public class ConfigTestHelpers { @@ -71,7 +71,7 @@ public static DateHistogramGroupConfig randomDateHistogramGroupConfig(final Rand final String field = randomField(random); final DateHistogramInterval interval = randomInterval(); final DateHistogramInterval delay = random.nextBoolean() ? randomInterval() : null; - final String timezone = random.nextBoolean() ? randomDateTimeZone().toString() : null; + String timezone = random.nextBoolean() ? randomZone().getId() : null; return new DateHistogramGroupConfig(field, interval, delay, timezone); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java index 415e1a00a60cf..95df682ff5e14 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java @@ -14,9 +14,9 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -155,28 +155,28 @@ public void testBwcSerialization() throws IOException { DateHistogramInterval interval = new DateHistogramInterval(in); String field = in.readString(); DateHistogramInterval delay = in.readOptionalWriteable(DateHistogramInterval::new); - DateTimeZone timeZone = in.readTimeZone(); + ZoneId timeZone = in.readZoneId(); - assertEqualInstances(reference, new DateHistogramGroupConfig(field, interval, delay, timeZone.getID())); + assertEqualInstances(reference, new DateHistogramGroupConfig(field, interval, delay, timeZone.getId())); } for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { final String field = ConfigTestHelpers.randomField(random()); final DateHistogramInterval interval = ConfigTestHelpers.randomInterval(); final DateHistogramInterval delay = randomBoolean() ? ConfigTestHelpers.randomInterval() : null; - final DateTimeZone timezone = randomDateTimeZone(); + final ZoneId timezone = randomZone(); // previous way to serialize a DateHistogramGroupConfig final BytesStreamOutput out = new BytesStreamOutput(); interval.writeTo(out); out.writeString(field); out.writeOptionalWriteable(delay); - out.writeTimeZone(timezone); + out.writeZoneId(timezone); final StreamInput in = out.bytes().streamInput(); DateHistogramGroupConfig deserialized = new DateHistogramGroupConfig(in); - assertEqualInstances(new DateHistogramGroupConfig(field, interval, delay, timezone.getID()), deserialized); + assertEqualInstances(new DateHistogramGroupConfig(field, interval, delay, timezone.getId()), deserialized); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java index 62dd08d6bd486..f642f3ab919d2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java @@ -60,7 +60,7 @@ public void testSerialization() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); if (randomBoolean()) { - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_7_0, Version.CURRENT); logger.info("Serializing with version {}", version); out.setVersion(version); } @@ -75,7 +75,32 @@ public void testSerialization() throws IOException { assertThat(copy.roleDescriptor(), equalTo(original.roleDescriptor())); } - public void testSerializationV63AndBefore() throws IOException { + public void testSerializationBetweenV64AndV66() throws IOException { + final PutRoleRequest original = buildRandomRequest(); + + final BytesStreamOutput out = new BytesStreamOutput(); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.V_6_6_0); + out.setVersion(version); + original.writeTo(out); + + final PutRoleRequest copy = new PutRoleRequest(); + final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); + StreamInput in = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(out.bytes())), registry); + in.setVersion(version); + copy.readFrom(in); + + assertThat(copy.name(), equalTo(original.name())); + assertThat(copy.cluster(), equalTo(original.cluster())); + assertIndicesSerializedRestricted(copy.indices(), original.indices()); + assertThat(copy.runAs(), equalTo(original.runAs())); + assertThat(copy.metadata(), equalTo(original.metadata())); + assertThat(copy.getRefreshPolicy(), equalTo(original.getRefreshPolicy())); + + assertThat(copy.applicationPrivileges(), equalTo(original.applicationPrivileges())); + assertThat(copy.conditionalClusterPrivileges(), equalTo(original.conditionalClusterPrivileges())); + } + + public void testSerializationV60AndV32() throws IOException { final PutRoleRequest original = buildRandomRequest(); final BytesStreamOutput out = new BytesStreamOutput(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 7ad03846c1d7e..5a567ad13ff80 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -312,20 +312,6 @@ public void testKibanaUserRole() { assertThat(kibanaUserRole.indices().allowedIndicesMatcher("indices:foo") .test(randomAlphaOfLengthBetween(8, 24)), is(false)); - Arrays.asList(".kibana", ".kibana-devnull").forEach((index) -> { - logger.info("index name [{}]", index); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher("indices:foo").test(index), is(false)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher("indices:bar").test(index), is(false)); - - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(true)); - }); - final String randomApplication = "kibana-" + randomAlphaOfLengthBetween(8, 24); assertThat(kibanaUserRole.application().grants(new ApplicationPrivilege(randomApplication, "app-random", "all"), "*"), is(false)); @@ -569,19 +555,6 @@ public void testKibanaDashboardOnlyUserRole() { assertThat(dashboardsOnlyUserRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); - final String index = ".kibana"; - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher("indices:foo").test(index), is(false)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher("indices:bar").test(index), is(false)); - - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(false)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(false)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(false)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(false)); - - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(index), is(true)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); - final String randomApplication = "kibana-" + randomAlphaOfLengthBetween(8, 24); assertThat(dashboardsOnlyUserRole.application().grants(new ApplicationPrivilege(randomApplication, "app-random", "all"), "*"), is(false)); diff --git a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle index 59df733892944..34b7cf9e44c58 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle @@ -4,6 +4,8 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(':x-pack:plugin:ccr:qa') + testCompile project(':x-pack:plugin:core') + testCompile project(':x-pack:plugin:ilm') } task leaderClusterTest(type: RestIntegTestTask) { @@ -25,6 +27,8 @@ leaderClusterTestCluster { leaderClusterTestRunner { systemProperty 'tests.target_cluster', 'leader' + /* To support taking index snapshots, we have to set path.repo setting */ + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") } task followClusterTest(type: RestIntegTestTask) {} @@ -47,6 +51,8 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'tests.target_cluster', 'follow' systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + /* To support taking index snapshots, we have to set path.repo setting */ + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") finalizedBy 'leaderClusterTestCluster#stop' } diff --git a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java index 797916c7c405f..9dbf32b376565 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java @@ -5,20 +5,34 @@ */ package org.elasticsearch.xpack.indexlifecycle; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.xpack.ccr.ESCCRRestTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.UnfollowAction; import java.io.IOException; +import java.io.InputStream; +import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -89,6 +103,77 @@ public void testBasicCCRAndILMIntegration() throws Exception { } } + public void testCCRUnfollowDuringSnapshot() throws Exception { + String indexName = "unfollow-test-index"; + if ("leader".equals(targetCluster)) { + Settings indexSettings = Settings.builder() + .put("index.soft_deletes.enabled", true) + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 0) + .build(); + createIndex(indexName, indexSettings); + ensureGreen(indexName); + } else if ("follow".equals(targetCluster)) { + createNewSingletonPolicy("unfollow-only", "hot", new UnfollowAction(), TimeValue.ZERO); + followIndex(indexName, indexName); + + // Create the repository before taking the snapshot. + Request request = new Request("PUT", "/_snapshot/repo"); + request.setJsonEntity(Strings + .toString(JsonXContent.contentBuilder() + .startObject() + .field("type", "fs") + .startObject("settings") + .field("compress", randomBoolean()) + .field("location", System.getProperty("tests.path.repo")) + .field("max_snapshot_bytes_per_sec", "256b") + .endObject() + .endObject())); + assertOK(client().performRequest(request)); + + try (RestClient leaderClient = buildLeaderClient()) { + index(leaderClient, indexName, "1"); + assertDocumentExists(leaderClient, indexName, "1"); + + updateIndexSettings(leaderClient, indexName, Settings.builder() + .put("index.lifecycle.indexing_complete", true) + .build()); + + // start snapshot + request = new Request("PUT", "/_snapshot/repo/snapshot"); + request.addParameter("wait_for_completion", "false"); + request.setJsonEntity("{\"indices\": \"" + indexName + "\"}"); + assertOK(client().performRequest(request)); + + // add policy and expect it to trigger unfollow immediately (while snapshot in progress) + logger.info("--> starting unfollow"); + updatePolicy(indexName, "unfollow-only"); + + assertBusy(() -> { + // Ensure that 'index.lifecycle.indexing_complete' is replicated: + assertThat(getIndexSetting(leaderClient, indexName, "index.lifecycle.indexing_complete"), equalTo("true")); + assertThat(getIndexSetting(client(), indexName, "index.lifecycle.indexing_complete"), equalTo("true")); + // ILM should have unfollowed the follower index, so the following_index setting should have been removed: + // (this controls whether the follow engine is used) + assertThat(getIndexSetting(client(), indexName, "index.xpack.ccr.following_index"), nullValue()); + // Following index should have the document + assertDocumentExists(client(), indexName, "1"); + // ILM should have completed the unfollow + assertILMPolicy(client(), indexName, "unfollow-only", "completed"); + }, 2, TimeUnit.MINUTES); + + // assert that snapshot succeeded + assertThat(getSnapshotState("snapshot"), equalTo("SUCCESS")); + assertOK(client().performRequest(new Request("DELETE", "/_snapshot/repo/snapshot"))); + ResponseException e = expectThrows(ResponseException.class, + () -> client().performRequest(new Request("GET", "/_snapshot/repo/snapshot"))); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + } + } else { + fail("unexpected target cluster [" + targetCluster + "]"); + } + } + public void testCcrAndIlmWithRollover() throws Exception { String alias = "metrics"; String indexName = "metrics-000001"; @@ -282,4 +367,35 @@ private static void assertDocumentExists(RestClient client, String index, String assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); } + private void createNewSingletonPolicy(String policyName, String phaseName, LifecycleAction action, TimeValue after) throws IOException { + Phase phase = new Phase(phaseName, after, singletonMap(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, singletonMap(phase.getName(), phase)); + XContentBuilder builder = jsonBuilder(); + lifecyclePolicy.toXContent(builder, null); + final StringEntity entity = new StringEntity( + "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); + Request request = new Request("PUT", "_ilm/policy/" + policyName); + request.setEntity(entity); + client().performRequest(request); + } + + public static void updatePolicy(String indexName, String policy) throws IOException { + + Request changePolicyRequest = new Request("PUT", "/" + indexName + "/_settings"); + final StringEntity changePolicyEntity = new StringEntity("{ \"index.lifecycle.name\": \"" + policy + "\" }", + ContentType.APPLICATION_JSON); + changePolicyRequest.setEntity(changePolicyEntity); + assertOK(client().performRequest(changePolicyRequest)); + } + + private String getSnapshotState(String snapshot) throws IOException { + Response response = client().performRequest(new Request("GET", "/_snapshot/repo/" + snapshot)); + Map responseMap; + try (InputStream is = response.getEntity().getContent()) { + responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + @SuppressWarnings("unchecked") Map snapResponse = ((List>) responseMap.get("snapshots")).get(0); + assertThat(snapResponse.get("snapshot"), equalTo(snapshot)); + return (String) snapResponse.get("state"); + } } diff --git a/x-pack/plugin/ilm/qa/multi-node/build.gradle b/x-pack/plugin/ilm/qa/multi-node/build.gradle index edd7f3aad472e..5f033626932df 100644 --- a/x-pack/plugin/ilm/qa/multi-node/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-node/build.gradle @@ -5,6 +5,11 @@ dependencies { testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } +integTestRunner { + /* To support taking index snapshots, we have to set path.repo setting */ + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") +} + integTestCluster { numNodes = 4 clusterName = 'ilm' @@ -16,5 +21,4 @@ integTestCluster { setting 'xpack.ml.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'indices.lifecycle.poll_interval', '1000ms' - } diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java index 01eba362711b3..878ddec5e5fe9 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.FrozenEngine; import org.elasticsearch.test.rest.ESRestTestCase; @@ -358,6 +359,44 @@ public void testDeleteOnlyShouldNotMakeIndexReadonly() throws Exception { indexDocument(); } + public void testDeleteDuringSnapshot() throws Exception { + // Create the repository before taking the snapshot. + Request request = new Request("PUT", "/_snapshot/repo"); + request.setJsonEntity(Strings + .toString(JsonXContent.contentBuilder() + .startObject() + .field("type", "fs") + .startObject("settings") + .field("compress", randomBoolean()) + .field("location", System.getProperty("tests.path.repo")) + .field("max_snapshot_bytes_per_sec", "256b") + .endObject() + .endObject())); + assertOK(client().performRequest(request)); + // create delete policy + createNewSingletonPolicy("delete", new DeleteAction(), TimeValue.timeValueMillis(0)); + // create index without policy + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + // index document so snapshot actually does something + indexDocument(); + // start snapshot + request = new Request("PUT", "/_snapshot/repo/snapshot"); + request.addParameter("wait_for_completion", "false"); + request.setJsonEntity("{\"indices\": \"" + index + "\"}"); + assertOK(client().performRequest(request)); + // add policy and expect it to trigger delete immediately (while snapshot in progress) + updatePolicy(index, policy); + // assert that index was deleted + assertBusy(() -> assertFalse(indexExists(index)), 2, TimeUnit.MINUTES); + // assert that snapshot is still in progress and clean up + assertThat(getSnapshotState("snapshot"), equalTo("SUCCESS")); + assertOK(client().performRequest(new Request("DELETE", "/_snapshot/repo/snapshot"))); + ResponseException e = expectThrows(ResponseException.class, + () -> client().performRequest(new Request("GET", "/_snapshot/repo/snapshot"))); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + } + public void testReadOnly() throws Exception { createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); @@ -427,6 +466,56 @@ public void testShrinkAction() throws Exception { expectThrows(ResponseException.class, this::indexDocument); } + public void testShrinkDuringSnapshot() throws Exception { + String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index; + // Create the repository before taking the snapshot. + Request request = new Request("PUT", "/_snapshot/repo"); + request.setJsonEntity(Strings + .toString(JsonXContent.contentBuilder() + .startObject() + .field("type", "fs") + .startObject("settings") + .field("compress", randomBoolean()) + .field("location", System.getProperty("tests.path.repo")) + .field("max_snapshot_bytes_per_sec", "256b") + .endObject() + .endObject())); + assertOK(client().performRequest(request)); + // create delete policy + createNewSingletonPolicy("warm", new ShrinkAction(1), TimeValue.timeValueMillis(0)); + // create index without policy + createIndexWithSettings(index, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + // required so the shrink doesn't wait on SetSingleNodeAllocateStep + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_name", "node-0")); + // index document so snapshot actually does something + indexDocument(); + // start snapshot + request = new Request("PUT", "/_snapshot/repo/snapshot"); + request.addParameter("wait_for_completion", "false"); + request.setJsonEntity("{\"indices\": \"" + index + "\"}"); + assertOK(client().performRequest(request)); + // add policy and expect it to trigger shrink immediately (while snapshot in progress) + updatePolicy(index, policy); + // assert that index was shrunk and original index was deleted + assertBusy(() -> { + assertTrue(indexExists(shrunkenIndex)); + assertTrue(aliasExists(shrunkenIndex, index)); + Map settings = getOnlyIndexSettings(shrunkenIndex); + assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY)); + assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(1))); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + }, 2, TimeUnit.MINUTES); + expectThrows(ResponseException.class, this::indexDocument); + // assert that snapshot succeeded + assertThat(getSnapshotState("snapshot"), equalTo("SUCCESS")); + assertOK(client().performRequest(new Request("DELETE", "/_snapshot/repo/snapshot"))); + ResponseException e = expectThrows(ResponseException.class, + () -> client().performRequest(new Request("GET", "/_snapshot/repo/snapshot"))); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + } + public void testFreezeAction() throws Exception { createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); @@ -441,6 +530,50 @@ public void testFreezeAction() throws Exception { }); } + public void testFreezeDuringSnapshot() throws Exception { + // Create the repository before taking the snapshot. + Request request = new Request("PUT", "/_snapshot/repo"); + request.setJsonEntity(Strings + .toString(JsonXContent.contentBuilder() + .startObject() + .field("type", "fs") + .startObject("settings") + .field("compress", randomBoolean()) + .field("location", System.getProperty("tests.path.repo")) + .field("max_snapshot_bytes_per_sec", "256b") + .endObject() + .endObject())); + assertOK(client().performRequest(request)); + // create delete policy + createNewSingletonPolicy("cold", new FreezeAction(), TimeValue.timeValueMillis(0)); + // create index without policy + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + // index document so snapshot actually does something + indexDocument(); + // start snapshot + request = new Request("PUT", "/_snapshot/repo/snapshot"); + request.addParameter("wait_for_completion", "false"); + request.setJsonEntity("{\"indices\": \"" + index + "\"}"); + assertOK(client().performRequest(request)); + // add policy and expect it to trigger delete immediately (while snapshot in progress) + updatePolicy(index, policy); + // assert that the index froze + assertBusy(() -> { + Map settings = getOnlyIndexSettings(index); + assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + assertThat(settings.get(IndexSettings.INDEX_SEARCH_THROTTLED.getKey()), equalTo("true")); + assertThat(settings.get(FrozenEngine.INDEX_FROZEN.getKey()), equalTo("true")); + }, 2, TimeUnit.MINUTES); + // assert that snapshot is still in progress and clean up + assertThat(getSnapshotState("snapshot"), equalTo("SUCCESS")); + assertOK(client().performRequest(new Request("DELETE", "/_snapshot/repo/snapshot"))); + ResponseException e = expectThrows(ResponseException.class, + () -> client().performRequest(new Request("GET", "/_snapshot/repo/snapshot"))); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + } + public void testSetPriority() throws Exception { createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.INDEX_PRIORITY_SETTING.getKey(), 100)); @@ -763,4 +896,15 @@ private void indexDocument() throws IOException { Response response = client().performRequest(indexRequest); logger.info(response.getStatusLine()); } + + private String getSnapshotState(String snapshot) throws IOException { + Response response = client().performRequest(new Request("GET", "/_snapshot/repo/" + snapshot)); + Map responseMap; + try (InputStream is = response.getEntity().getContent()) { + responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + Map snapResponse = ((List>) responseMap.get("snapshots")).get(0); + assertThat(snapResponse.get("snapshot"), equalTo(snapshot)); + return (String) snapResponse.get("state"); + } } diff --git a/x-pack/plugin/ilm/qa/rest/build.gradle b/x-pack/plugin/ilm/qa/rest/build.gradle index ff9bf98bce5f5..7a79d1c20d936 100644 --- a/x-pack/plugin/ilm/qa/rest/build.gradle +++ b/x-pack/plugin/ilm/qa/rest/build.gradle @@ -21,7 +21,7 @@ restTestRunner { } restTestCluster { - distribution 'zip' + distribution 'default' setting 'xpack.ilm.enabled', 'true' setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java index baa1d8facd958..2e7d2fbbc555d 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java @@ -135,8 +135,8 @@ public Collection createComponents(Client client, ClusterService cluster if (enabled == false || transportClientMode) { return emptyList(); } - indexLifecycleInitialisationService - .set(new IndexLifecycleService(settings, client, clusterService, getClock(), System::currentTimeMillis, xContentRegistry)); + indexLifecycleInitialisationService.set(new IndexLifecycleService(settings, client, clusterService, threadPool, + getClock(), System::currentTimeMillis, xContentRegistry)); return Collections.singletonList(indexLifecycleInitialisationService.get()); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java index 7ba7bcafe55f8..f6c068d945d79 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; @@ -23,6 +24,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.Index; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep; import org.elasticsearch.xpack.core.indexlifecycle.AsyncWaitStep; import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateActionStep; @@ -53,14 +55,17 @@ public class IndexLifecycleRunner { private static final Logger logger = LogManager.getLogger(IndexLifecycleRunner.class); private static final ToXContent.Params STACKTRACE_PARAMS = new ToXContent.MapParams(Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); + private final ThreadPool threadPool; private PolicyStepsRegistry stepRegistry; private ClusterService clusterService; private LongSupplier nowSupplier; - public IndexLifecycleRunner(PolicyStepsRegistry stepRegistry, ClusterService clusterService, LongSupplier nowSupplier) { + public IndexLifecycleRunner(PolicyStepsRegistry stepRegistry, ClusterService clusterService, + ThreadPool threadPool, LongSupplier nowSupplier) { this.stepRegistry = stepRegistry; this.clusterService = clusterService; this.nowSupplier = nowSupplier; + this.threadPool = threadPool; } /** @@ -168,7 +173,8 @@ public void maybeRunAsyncAction(ClusterState currentState, IndexMetaData indexMe } if (currentStep instanceof AsyncActionStep) { logger.debug("[{}] running policy with async action step [{}]", index, currentStep.getKey()); - ((AsyncActionStep) currentStep).performAction(indexMetaData, currentState, new AsyncActionStep.Listener() { + ((AsyncActionStep) currentStep).performAction(indexMetaData, currentState, + new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()), new AsyncActionStep.Listener() { @Override public void onResponse(boolean complete) { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java index b77997a94a3d6..d143e80340cc0 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java @@ -53,13 +53,14 @@ public class IndexLifecycleService private final PolicyStepsRegistry policyRegistry; private final IndexLifecycleRunner lifecycleRunner; private final Settings settings; + private final ThreadPool threadPool; private Client client; private ClusterService clusterService; private LongSupplier nowSupplier; private SchedulerEngine.Job scheduledJob; - public IndexLifecycleService(Settings settings, Client client, ClusterService clusterService, Clock clock, LongSupplier nowSupplier, - NamedXContentRegistry xContentRegistry) { + public IndexLifecycleService(Settings settings, Client client, ClusterService clusterService, ThreadPool threadPool, Clock clock, + LongSupplier nowSupplier, NamedXContentRegistry xContentRegistry) { super(); this.settings = settings; this.client = client; @@ -68,7 +69,8 @@ public IndexLifecycleService(Settings settings, Client client, ClusterService cl this.nowSupplier = nowSupplier; this.scheduledJob = null; this.policyRegistry = new PolicyStepsRegistry(xContentRegistry, client); - this.lifecycleRunner = new IndexLifecycleRunner(policyRegistry, clusterService, nowSupplier); + this.threadPool = threadPool; + this.lifecycleRunner = new IndexLifecycleRunner(policyRegistry, clusterService, threadPool, nowSupplier); this.pollInterval = LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING.get(settings); clusterService.addStateApplier(this); clusterService.addListener(this); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java index 586c3c683264e..aad85426fc338 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java @@ -32,11 +32,12 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { String lifecycleName = restRequest.param("name"); - XContentParser parser = restRequest.contentParser(); - PutLifecycleAction.Request putLifecycleRequest = PutLifecycleAction.Request.parseRequest(lifecycleName, parser); - putLifecycleRequest.timeout(restRequest.paramAsTime("timeout", putLifecycleRequest.timeout())); - putLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putLifecycleRequest.masterNodeTimeout())); + try (XContentParser parser = restRequest.contentParser()) { + PutLifecycleAction.Request putLifecycleRequest = PutLifecycleAction.Request.parseRequest(lifecycleName, parser); + putLifecycleRequest.timeout(restRequest.paramAsTime("timeout", putLifecycleRequest.timeout())); + putLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putLifecycleRequest.masterNodeTimeout())); - return channel -> client.execute(PutLifecycleAction.INSTANCE, putLifecycleRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute(PutLifecycleAction.INSTANCE, putLifecycleRequest, new RestToXContentListener<>(channel)); + } } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java index da51bb68962d7..61f9be3558fa7 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java @@ -87,7 +87,12 @@ public ClusterState execute(ClusterState currentState) throws Exception { SortedMap newPolicies = new TreeMap<>(currentMetadata.getPolicyMetadatas()); LifecyclePolicyMetadata lifecyclePolicyMetadata = new LifecyclePolicyMetadata(request.getPolicy(), filteredHeaders, nextVersion, Instant.now().toEpochMilli()); - newPolicies.put(lifecyclePolicyMetadata.getName(), lifecyclePolicyMetadata); + LifecyclePolicyMetadata oldPolicy = newPolicies.put(lifecyclePolicyMetadata.getName(), lifecyclePolicyMetadata); + if (oldPolicy == null) { + logger.info("adding index lifecycle policy [{}]", request.getPolicy().getName()); + } else { + logger.info("updating index lifecycle policy [{}]", request.getPolicy().getName()); + } IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, OperationMode.RUNNING); newState.metaData(MetaData.builder(currentState.getMetaData()) .putCustom(IndexLifecycleMetadata.TYPE, newMetadata).build()); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java index f4ab15a30e880..565a9723c8c6c 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -52,6 +53,8 @@ import org.elasticsearch.xpack.core.indexlifecycle.Step; import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.junit.After; +import org.junit.Before; import org.mockito.ArgumentMatcher; import org.mockito.Mockito; @@ -80,6 +83,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase { private static final NamedXContentRegistry REGISTRY; + private ThreadPool threadPool; static { try (IndexLifecycle indexLifecycle = new IndexLifecycle(Settings.EMPTY)) { @@ -88,6 +92,16 @@ public class IndexLifecycleRunnerTests extends ESTestCase { } } + @Before + public void prepareThreadPool() { + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() { + threadPool.shutdownNow(); + } + /** A real policy steps registry where getStep can be overridden so that JSON doesn't have to be parsed */ private class MockPolicyStepsRegistry extends PolicyStepsRegistry { private BiFunction fn = null; @@ -142,7 +156,7 @@ public void testRunPolicyTerminalPolicyStep() { TerminalPolicyStep step = TerminalPolicyStep.INSTANCE; PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); ClusterService clusterService = mock(ClusterService.class); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> 0L); IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); @@ -157,7 +171,7 @@ public void testRunPolicyErrorStep() { MockClusterStateWaitStep step = new MockClusterStateWaitStep(stepKey, null); PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); ClusterService clusterService = mock(ClusterService.class); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> 0L); LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder(); newState.setPhase(stepKey.getPhase()); newState.setAction(stepKey.getAction()); @@ -197,7 +211,7 @@ public void testRunStateChangePolicyWithNoNextStep() throws Exception { .localNodeId(node.getId())) .build(); ClusterServiceUtils.setState(clusterService, state); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> 0L); ClusterState before = clusterService.state(); CountDownLatch latch = new CountDownLatch(1); @@ -258,7 +272,7 @@ public void testRunStateChangePolicyWithNextStep() throws Exception { .build(); ClusterServiceUtils.setState(clusterService, state); long stepTime = randomLong(); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> stepTime); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> stepTime); ClusterState before = clusterService.state(); CountDownLatch latch = new CountDownLatch(1); @@ -307,7 +321,7 @@ public void testRunAsyncActionDoesNotRun() { .localNodeId(node.getId())) .build(); ClusterServiceUtils.setState(clusterService, state); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> 0L); ClusterState before = clusterService.state(); // State changes should not run AsyncAction steps @@ -366,7 +380,7 @@ public void testRunStateChangePolicyWithAsyncActionNextStep() throws Exception { .build(); logger.info("--> state: {}", state); ClusterServiceUtils.setState(clusterService, state); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> 0L); ClusterState before = clusterService.state(); CountDownLatch latch = new CountDownLatch(1); @@ -435,7 +449,7 @@ public void testRunPeriodicStep() throws Exception { .build(); logger.info("--> state: {}", state); ClusterServiceUtils.setState(clusterService, state); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> 0L); ClusterState before = clusterService.state(); CountDownLatch latch = new CountDownLatch(1); @@ -458,7 +472,7 @@ public void testRunPolicyClusterStateActionStep() { MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, null); PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); ClusterService clusterService = mock(ClusterService.class); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> 0L); IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); @@ -476,7 +490,7 @@ public void testRunPolicyClusterStateWaitStep() { step.setWillComplete(true); PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); ClusterService clusterService = mock(ClusterService.class); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> 0L); IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); @@ -495,7 +509,7 @@ public void testRunPolicyAsyncActionStepClusterStateChangeIgnored() { step.setException(expectedException); PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); ClusterService clusterService = mock(ClusterService.class); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> 0L); IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); @@ -513,7 +527,7 @@ public void testRunPolicyAsyncWaitStepClusterStateChangeIgnored() { step.setException(expectedException); PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); ClusterService clusterService = mock(ClusterService.class); - IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, threadPool, () -> 0L); IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); @@ -527,7 +541,7 @@ public void testRunPolicyThatDoesntExist() { String policyName = "cluster_state_action_policy"; ClusterService clusterService = mock(ClusterService.class); IndexLifecycleRunner runner = new IndexLifecycleRunner(new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, null), - clusterService, () -> 0L); + clusterService, threadPool, () -> 0L); IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); // verify that no exception is thrown @@ -889,7 +903,7 @@ public void testMoveClusterStateToFailedStep() { ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.singletonList(policyMetadata)); Index index = clusterState.metaData().index(indexName).getIndex(); - IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, threadPool, () -> now); ClusterState nextClusterState = runner.moveClusterStateToFailedStep(clusterState, indices); IndexLifecycleRunnerTests.assertClusterStateOnNextStep(clusterState, index, errorStepKey, failedStepKey, nextClusterState, now); @@ -923,7 +937,7 @@ public void testMoveClusterStateToFailedStepWithUnknownStep() { lifecycleState.setFailedStep(failedStepKey.getName()); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.singletonList(policyMetadata)); - IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, threadPool, () -> now); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> runner.moveClusterStateToFailedStep(clusterState, indices)); assertThat(exception.getMessage(), equalTo("step [" + failedStepKey @@ -935,7 +949,7 @@ public void testMoveClusterStateToFailedStepIndexNotFound() { String invalidIndexName = "does_not_exist"; ClusterState clusterState = buildClusterState(existingIndexName, Settings.builder(), LifecycleExecutionState.builder().build(), Collections.emptyList()); - IndexLifecycleRunner runner = new IndexLifecycleRunner(null, null, () -> 0L); + IndexLifecycleRunner runner = new IndexLifecycleRunner(null, null, threadPool, () -> 0L); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> runner.moveClusterStateToFailedStep(clusterState, new String[] { invalidIndexName })); assertThat(exception.getMessage(), equalTo("index [" + invalidIndexName + "] does not exist")); @@ -958,7 +972,7 @@ public void testMoveClusterStateToFailedStepInvalidPolicySetting() { lifecycleState.setStep(errorStepKey.getName()); lifecycleState.setFailedStep(failedStepKey.getName()); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); - IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, threadPool, () -> now); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> runner.moveClusterStateToFailedStep(clusterState, indices)); assertThat(exception.getMessage(), equalTo("index [" + indexName + "] is not associated with an Index Lifecycle Policy")); @@ -979,7 +993,7 @@ public void testMoveClusterStateToFailedNotOnError() { lifecycleState.setAction(failedStepKey.getAction()); lifecycleState.setStep(failedStepKey.getName()); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); - IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, threadPool, () -> now); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> runner.moveClusterStateToFailedStep(clusterState, indices)); assertThat(exception.getMessage(), equalTo("cannot retry an action for an index [" + indices[0] @@ -1176,7 +1190,7 @@ public void testIsReadyToTransition() { stepMap, NamedXContentRegistry.EMPTY, null); ClusterService clusterService = mock(ClusterService.class); final AtomicLong now = new AtomicLong(5); - IndexLifecycleRunner runner = new IndexLifecycleRunner(policyStepsRegistry, clusterService, now::get); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyStepsRegistry, clusterService, threadPool, now::get); IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) .numberOfShards(randomIntBetween(1, 5)) .numberOfReplicas(randomIntBetween(0, 5)) @@ -1360,7 +1374,8 @@ public void setLatch(CountDownLatch latch) { } @Override - public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, + ClusterStateObserver observer, Listener listener) { executeCount++; if (latch != null) { latch.countDown(); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java index 13fe9c1c69002..810f913775e3b 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java @@ -25,8 +25,10 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; @@ -67,6 +69,7 @@ public class IndexLifecycleServiceTests extends ESTestCase { private DiscoveryNode masterNode; private IndicesAdminClient indicesClient; private long now; + private ThreadPool threadPool; @Before public void prepareServices() { @@ -96,7 +99,9 @@ public void prepareServices() { when(adminClient.indices()).thenReturn(indicesClient); when(client.settings()).thenReturn(Settings.EMPTY); - indexLifecycleService = new IndexLifecycleService(Settings.EMPTY, client, clusterService, clock, () -> now, null); + threadPool = new TestThreadPool("test"); + indexLifecycleService = new IndexLifecycleService(Settings.EMPTY, client, clusterService, threadPool, + clock, () -> now, null); Mockito.verify(clusterService).addListener(indexLifecycleService); Mockito.verify(clusterService).addStateApplier(indexLifecycleService); } @@ -104,6 +109,7 @@ public void prepareServices() { @After public void cleanup() { indexLifecycleService.close(); + threadPool.shutdownNow(); } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle index 188086afcf14a..95a2f8db2c49c 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle @@ -36,6 +36,7 @@ integTestCluster { dependsOn copyKeyCerts setting 'xpack.security.enabled', 'true' setting 'xpack.ml.enabled', 'true' + setting 'xpack.watcher.enabled', 'false' setting 'logger.org.elasticsearch.xpack.ml.datafeed', 'TRACE' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.security.authc.token.enabled', 'true' diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java index 1453f59ed43e4..454a3eb06e5a7 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java +++ b/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java @@ -13,8 +13,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.ml.MachineLearning; -import org.joda.time.DateTime; +import java.time.Clock; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; @@ -266,7 +268,7 @@ public void testHRDSplit() throws Exception { "\"time\": { \"type\": \"date\" } }"); // Index some data - DateTime baseTime = new DateTime().minusYears(1); + ZonedDateTime baseTime = ZonedDateTime.now(Clock.systemDefaultZone()).minusYears(1); TestConfiguration test = tests.get(randomInt(tests.size()-1)); // domainSplit() tests had subdomain, testHighestRegisteredDomainCases() did not, so we need a special case for sub @@ -276,18 +278,20 @@ public void testHRDSplit() throws Exception { for (int i = 0; i < 100; i++) { - DateTime time = baseTime.plusHours(i); + ZonedDateTime time = baseTime.plusHours(i); if (i == 64) { // Anomaly has 100 docs, but we don't care about the value for (int j = 0; j < 100; j++) { - Request createDocRequest = new Request("PUT", "/painless/_doc/" + time.toDateTimeISO() + "_" + j); - createDocRequest.setJsonEntity("{\"domain\": \"" + "bar.bar.com\", \"time\": \"" + time.toDateTimeISO() + "\"}"); + String formattedTime = time.format(DateTimeFormatter.ISO_DATE_TIME); + Request createDocRequest = new Request("PUT", "/painless/_doc/" + formattedTime + "_" + j); + createDocRequest.setJsonEntity("{\"domain\": \"" + "bar.bar.com\", \"time\": \"" + formattedTime + "\"}"); client().performRequest(createDocRequest); } } else { // Non-anomalous values will be what's seen when the anomaly is reported - Request createDocRequest = new Request("PUT", "/painless/_doc/" + time.toDateTimeISO()); - createDocRequest.setJsonEntity("{\"domain\": \"" + test.hostName + "\", \"time\": \"" + time.toDateTimeISO() + "\"}"); + String formattedTime = time.format(DateTimeFormatter.ISO_DATE_TIME); + Request createDocRequest = new Request("PUT", "/painless/_doc/" + formattedTime); + createDocRequest.setJsonEntity("{\"domain\": \"" + test.hostName + "\", \"time\": \"" + formattedTime + "\"}"); client().performRequest(createDocRequest); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java index 190933b1e9316..5b9852ba4fddc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java @@ -16,9 +16,9 @@ import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; -import org.joda.time.DateTime; -import org.joda.time.chrono.ISOChronology; +import java.time.Clock; +import java.time.ZonedDateTime; import java.util.Objects; import java.util.Random; import java.util.concurrent.ScheduledFuture; @@ -70,9 +70,14 @@ public MlDailyMaintenanceService(ClusterName clusterName, ThreadPool threadPool, private static TimeValue delayToNextTime(ClusterName clusterName) { Random random = new Random(clusterName.hashCode()); int minutesOffset = random.ints(0, MAX_TIME_OFFSET_MINUTES).findFirst().getAsInt(); - DateTime now = DateTime.now(ISOChronology.getInstance()); - DateTime next = now.plusDays(1).withTimeAtStartOfDay().plusMinutes(30).plusMinutes(minutesOffset); - return TimeValue.timeValueMillis(next.getMillis() - now.getMillis()); + + ZonedDateTime now = ZonedDateTime.now(Clock.systemDefaultZone()); + ZonedDateTime next = now.plusDays(1) + .toLocalDate() + .atStartOfDay(now.getZone()) + .plusMinutes(30) + .plusMinutes(minutesOffset); + return TimeValue.timeValueMillis(next.toInstant().toEpochMilli() - now.toInstant().toEpochMilli()); } public void start() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 2da89c359e793..a5aed9b5b5957 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -7,14 +7,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -23,21 +19,14 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.AliasOrIndex; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.Index; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.AllocatedPersistentTask; @@ -45,7 +34,6 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.persistent.PersistentTasksService; -import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -69,9 +57,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; -import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.LinkedList; import java.util.List; @@ -405,54 +391,6 @@ private static boolean jobHasRules(Job job) { return job.getAnalysisConfig().getDetectors().stream().anyMatch(d -> d.getRules().isEmpty() == false); } - static String[] mappingRequiresUpdate(ClusterState state, String[] concreteIndices, Version minVersion, - Logger logger) throws IOException { - List indicesToUpdate = new ArrayList<>(); - - ImmutableOpenMap> currentMapping = state.metaData().findMappings(concreteIndices, - new String[] { ElasticsearchMappings.DOC_TYPE }, MapperPlugin.NOOP_FIELD_FILTER); - - for (String index : concreteIndices) { - ImmutableOpenMap innerMap = currentMapping.get(index); - if (innerMap != null) { - MappingMetaData metaData = innerMap.get(ElasticsearchMappings.DOC_TYPE); - try { - Map meta = (Map) metaData.sourceAsMap().get("_meta"); - if (meta != null) { - String versionString = (String) meta.get("version"); - if (versionString == null) { - logger.info("Version of mappings for [{}] not found, recreating", index); - indicesToUpdate.add(index); - continue; - } - - Version mappingVersion = Version.fromString(versionString); - - if (mappingVersion.onOrAfter(minVersion)) { - continue; - } else { - logger.info("Mappings for [{}] are outdated [{}], updating it[{}].", index, mappingVersion, Version.CURRENT); - indicesToUpdate.add(index); - continue; - } - } else { - logger.info("Version of mappings for [{}] not found, recreating", index); - indicesToUpdate.add(index); - continue; - } - } catch (Exception e) { - logger.error(new ParameterizedMessage("Failed to retrieve mapping version for [{}], recreating", index), e); - indicesToUpdate.add(index); - continue; - } - } else { - logger.info("No mappings found for [{}], recreating", index); - indicesToUpdate.add(index); - } - } - return indicesToUpdate.toArray(new String[indicesToUpdate.size()]); - } - @Override protected String executor() { // This api doesn't do heavy or blocking operations (just delegates PersistentTasksService), @@ -527,25 +465,18 @@ public void onFailure(Exception e) { ); // Try adding state doc mapping - ActionListener resultsPutMappingHandler = ActionListener.wrap( + ActionListener getJobHandler = ActionListener.wrap( response -> { - addDocMappingIfMissing(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), ElasticsearchMappings::stateMapping, - state, jobUpdateListener); + ElasticsearchMappings.addDocMappingIfMissing(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), + ElasticsearchMappings::stateMapping, client, state, jobUpdateListener); }, listener::onFailure ); // Get the job config jobConfigProvider.getJob(jobParams.getJobId(), ActionListener.wrap( builder -> { - try { - jobParams.setJob(builder.build()); - - // Try adding results doc mapping - addDocMappingIfMissing(AnomalyDetectorsIndex.jobResultsAliasedName(jobParams.getJobId()), - ElasticsearchMappings::resultsMapping, state, resultsPutMappingHandler); - } catch (Exception e) { - listener.onFailure(e); - } + jobParams.setJob(builder.build()); + getJobHandler.onResponse(null); }, listener::onFailure )); @@ -620,48 +551,6 @@ public void onFailure(Exception e) { ); } - private void addDocMappingIfMissing(String alias, CheckedSupplier mappingSupplier, ClusterState state, - ActionListener listener) { - AliasOrIndex aliasOrIndex = state.metaData().getAliasAndIndexLookup().get(alias); - if (aliasOrIndex == null) { - // The index has never been created yet - listener.onResponse(true); - return; - } - String[] concreteIndices = aliasOrIndex.getIndices().stream().map(IndexMetaData::getIndex).map(Index::getName) - .toArray(String[]::new); - - String[] indicesThatRequireAnUpdate; - try { - indicesThatRequireAnUpdate = mappingRequiresUpdate(state, concreteIndices, Version.CURRENT, logger); - } catch (IOException e) { - listener.onFailure(e); - return; - } - - if (indicesThatRequireAnUpdate.length > 0) { - try (XContentBuilder mapping = mappingSupplier.get()) { - PutMappingRequest putMappingRequest = new PutMappingRequest(indicesThatRequireAnUpdate); - putMappingRequest.type(ElasticsearchMappings.DOC_TYPE); - putMappingRequest.source(mapping); - executeAsyncWithOrigin(client, ML_ORIGIN, PutMappingAction.INSTANCE, putMappingRequest, - ActionListener.wrap(response -> { - if (response.isAcknowledged()) { - listener.onResponse(true); - } else { - listener.onFailure(new ElasticsearchException("Attempt to put missing mapping in indices " - + Arrays.toString(indicesThatRequireAnUpdate) + " was not acknowledged")); - } - }, listener::onFailure)); - } catch (IOException e) { - listener.onFailure(e); - } - } else { - logger.trace("Mappings are uptodate."); - listener.onResponse(true); - } - } - public static class OpenJobPersistentTasksExecutor extends PersistentTasksExecutor { private static final Logger logger = LogManager.getLogger(OpenJobPersistentTasksExecutor.class); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 35878f1199586..85f2489e6b0e5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -128,7 +128,6 @@ Long runLookBack(long startTime, Long endTime) throws Exception { auditor.info(jobId, msg); LOGGER.info("[{}] {}", jobId, msg); - FlushJobAction.Request request = new FlushJobAction.Request(jobId); request.setCalcInterim(true); run(lookbackStartTimeMs, lookbackEnd, request); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java index 86fe439ac16cb..f8fa3b1874808 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java @@ -17,11 +17,11 @@ import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; import org.elasticsearch.xpack.core.ml.action.util.PageParams; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; -import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory.BucketWithMissingData; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.utils.Intervals; -import org.joda.time.DateTime; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory.BucketWithMissingData; +import java.time.ZonedDateTime; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -131,8 +131,8 @@ private Map checkCurrentBucketEventCount(long start, long end) { } private static long toHistogramKeyToEpoch(Object key) { - if (key instanceof DateTime) { - return ((DateTime)key).getMillis(); + if (key instanceof ZonedDateTime) { + return ((ZonedDateTime)key).toInstant().toEpochMilli(); } else if (key instanceof Double) { return ((Double)key).longValue(); } else if (key instanceof Long){ diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java index db8dea22675f2..8cf3ed39651d6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java @@ -21,10 +21,10 @@ import org.elasticsearch.search.aggregations.metrics.Percentiles; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.joda.time.DateTime; import java.io.IOException; import java.io.OutputStream; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -210,15 +210,15 @@ private void processDateHistogram(Histogram agg) throws IOException { } /* - * Date Histograms have a {@link DateTime} object as the key, + * Date Histograms have a {@link ZonedDateTime} object as the key, * Histograms have either a Double or Long. */ private long toHistogramKeyToEpoch(Object key) { - if (key instanceof DateTime) { - return ((DateTime)key).getMillis(); + if (key instanceof ZonedDateTime) { + return ((ZonedDateTime)key).toInstant().toEpochMilli(); } else if (key instanceof Double) { return ((Double)key).longValue(); - } else if (key instanceof Long){ + } else if (key instanceof Long) { return (Long)key; } else { throw new IllegalStateException("Histogram key [" + key + "] cannot be converted to a timestamp"); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java index 232cd53a359ce..4223bff49825e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; -import org.joda.time.base.BaseDateTime; import java.util.List; import java.util.Map; @@ -112,8 +111,6 @@ public Object[] value(SearchHit hit) { } if (value[0] instanceof String) { // doc_value field with the epoch_millis format value[0] = Long.parseLong((String) value[0]); - } else if (value[0] instanceof BaseDateTime) { // script field - value[0] = ((BaseDateTime) value[0]).getMillis(); } else if (value[0] instanceof Long == false) { // pre-6.0 field throw new IllegalStateException("Unexpected value for a time field: " + value[0].getClass()); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/overallbuckets/OverallBucketsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/overallbuckets/OverallBucketsProvider.java index 204ae42720433..dd9a6229ec887 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/overallbuckets/OverallBucketsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/overallbuckets/OverallBucketsProvider.java @@ -14,8 +14,8 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.results.OverallBucket; import org.elasticsearch.xpack.core.ml.job.results.Result; -import org.joda.time.DateTime; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Date; import java.util.List; @@ -64,8 +64,8 @@ public List computeOverallBuckets(Histogram histogram) { } private static Date getHistogramBucketTimestamp(Histogram.Bucket bucket) { - DateTime bucketTimestamp = (DateTime) bucket.getKey(); - return new Date(bucketTimestamp.getMillis()); + ZonedDateTime bucketTimestamp = (ZonedDateTime) bucket.getKey(); + return new Date(bucketTimestamp.toInstant().toEpochMilli()); } static class TopNScores extends PriorityQueue { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 9695d73ed05c5..dd3656ee04b67 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -40,6 +40,7 @@ import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.job.process.autodetect.output.FlushAcknowledgement; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; @@ -417,7 +418,9 @@ public void onFailure(Exception e) { public void openJob(JobTask jobTask, ClusterState clusterState, Consumer closeHandler) { String jobId = jobTask.getJobId(); logger.info("Opening job [{}]", jobId); - AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterState, ActionListener.wrap( + + // Start the process + ActionListener stateAliasHandler = ActionListener.wrap( r -> { jobManager.getJob(jobId, ActionListener.wrap( job -> { @@ -427,7 +430,6 @@ public void openJob(JobTask jobTask, ClusterState clusterState, Consumer { // We need to fork, otherwise we restore model state from a network thread (several GET api calls): @@ -477,7 +479,17 @@ protected void doRun() { closeHandler )); }, - closeHandler)); + closeHandler); + + // Make sure the state index and alias exist + ActionListener resultsMappingUpdateHandler = ActionListener.wrap( + ack -> AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterState, stateAliasHandler), + closeHandler + ); + + // Try adding the results doc mapping - this updates to the latest version if an old mapping is present + ElasticsearchMappings.addDocMappingIfMissing(AnomalyDetectorsIndex.jobResultsAliasedName(jobId), + ElasticsearchMappings::resultsMapping, client, clusterState, resultsMappingUpdateHandler); } private void createProcessAndSetRunning(ProcessContext processContext, Job job, AutodetectParams params, Consumer handler) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java index b595c564ab9aa..be50114fc46e0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java @@ -15,9 +15,9 @@ import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.ml.job.persistence.BatchedJobsIterator; import org.elasticsearch.xpack.ml.utils.VolatileCursorIterator; -import org.joda.time.DateTime; -import org.joda.time.chrono.ISOChronology; +import java.time.Clock; +import java.time.Instant; import java.util.Deque; import java.util.Iterator; import java.util.List; @@ -71,7 +71,7 @@ private WrappedBatchedJobsIterator newJobIterator() { } private long calcCutoffEpochMs(long retentionDays) { - long nowEpochMs = DateTime.now(ISOChronology.getInstance()).getMillis(); + long nowEpochMs = Instant.now(Clock.systemDefaultZone()).toEpochMilli(); return nowEpochMs - new TimeValue(retentionDays, TimeUnit.DAYS).getMillis(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java index 3de9795deb335..3225a7eb9212e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java @@ -35,11 +35,11 @@ import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.ml.MachineLearning; -import org.joda.time.DateTime; -import org.joda.time.chrono.ISOChronology; import java.io.IOException; import java.io.InputStream; +import java.time.Clock; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -66,7 +66,7 @@ public class ExpiredForecastsRemover implements MlDataRemover { public ExpiredForecastsRemover(Client client, ThreadPool threadPool) { this.client = Objects.requireNonNull(client); this.threadPool = Objects.requireNonNull(threadPool); - this.cutoffEpochMs = DateTime.now(ISOChronology.getInstance()).getMillis(); + this.cutoffEpochMs = Instant.now(Clock.systemDefaultZone()).toEpochMilli(); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 9bd32bdc9eff3..da54b33d27597 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -38,7 +37,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -53,7 +51,6 @@ import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -61,7 +58,6 @@ import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import org.junit.Before; -import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; import java.util.Arrays; @@ -486,59 +482,6 @@ public void testVerifyIndicesPrimaryShardsAreActive() { assertEquals(indexToRemove, result.get(0)); } - public void testMappingRequiresUpdateNoMapping() throws IOException { - ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); - ClusterState cs = csBuilder.build(); - String[] indices = new String[] { "no_index" }; - - assertArrayEquals(new String[] { "no_index" }, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateNullMapping() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("null_mapping", null)); - String[] indices = new String[] { "null_index" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateNoVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("no_version_field", "NO_VERSION_FIELD")); - String[] indices = new String[] { "no_version_field" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateRecentMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_current", Version.CURRENT.toString())); - String[] indices = new String[] { "version_current" }; - assertArrayEquals(new String[] {}, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateMaliciousMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData( - Collections.singletonMap("version_current", Collections.singletonMap("nested", "1.0"))); - String[] indices = new String[] { "version_nested" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateBogusMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_bogus", "0.0")); - String[] indices = new String[] { "version_bogus" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateNewerMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_newer", Version.CURRENT)); - String[] indices = new String[] { "version_newer" }; - assertArrayEquals(new String[] {}, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousVersion(), - logger)); - } - - public void testMappingRequiresUpdateNewerMappingVersionMinor() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_newer_minor", Version.CURRENT)); - String[] indices = new String[] { "version_newer_minor" }; - assertArrayEquals(new String[] {}, - TransportOpenJobAction.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousMinorVersion(), logger)); - } - public void testNodeNameAndVersion() { TransportAddress ta = new TransportAddress(InetAddress.getLoopbackAddress(), 9300); Map attributes = new HashMap<>(); @@ -641,42 +584,6 @@ private void addIndices(MetaData.Builder metaData, RoutingTable.Builder routingT } } - private ClusterState getClusterStateWithMappingsWithMetaData(Map namesAndVersions) throws IOException { - MetaData.Builder metaDataBuilder = MetaData.builder(); - - for (Map.Entry entry : namesAndVersions.entrySet()) { - - String indexName = entry.getKey(); - Object version = entry.getValue(); - - IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName); - indexMetaData.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); - - Map mapping = new HashMap<>(); - Map properties = new HashMap<>(); - for (int i = 0; i < 10; i++) { - properties.put("field" + i, Collections.singletonMap("type", "string")); - } - mapping.put("properties", properties); - - Map meta = new HashMap<>(); - if (version != null && version.equals("NO_VERSION_FIELD") == false) { - meta.put("version", version); - } - mapping.put("_meta", meta); - - indexMetaData.putMapping(new MappingMetaData(ElasticsearchMappings.DOC_TYPE, mapping)); - - metaDataBuilder.put(indexMetaData); - } - MetaData metaData = metaDataBuilder.build(); - - ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); - csBuilder.metaData(metaData); - return csBuilder.build(); - } - private static Job jobWithRules(String jobId) { DetectionRule rule = new DetectionRule.Builder(Collections.singletonList( new RuleCondition(RuleCondition.AppliesTo.TYPICAL, Operator.LT, 100.0) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java index 1e5e6fa652db1..ad999daafb254 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java @@ -8,9 +8,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; import org.elasticsearch.xpack.ml.test.SearchHitBuilder; -import org.joda.time.DateTime; import java.util.Arrays; @@ -98,16 +96,16 @@ public void testNewTimeFieldGivenSource() { expectThrows(IllegalArgumentException.class, () -> ExtractedField.newTimeField("time", ExtractedField.ExtractionMethod.SOURCE)); } - public void testValueGivenTimeField() { + public void testValueGivenStringTimeField() { final long millis = randomLong(); - final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", new DateTime(millis)).build(); + final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", Long.toString(millis)).build(); final ExtractedField timeField = ExtractedField.newTimeField("time", ExtractedField.ExtractionMethod.DOC_VALUE); assertThat(timeField.value(hit), equalTo(new Object[] { millis })); } - public void testValueGivenStringTimeField() { + public void testValueGivenLongTimeField() { final long millis = randomLong(); - final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", Long.toString(millis)).build(); + final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", millis).build(); final ExtractedField timeField = ExtractedField.newTimeField("time", ExtractedField.ExtractionMethod.DOC_VALUE); assertThat(timeField.value(hit), equalTo(new Object[] { millis })); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java index 1fd6db3de566a..20dd49029b3ea 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.ml.test.SearchHitBuilder; -import org.joda.time.DateTime; import java.util.Arrays; import java.util.Collections; @@ -64,13 +63,6 @@ public void testAllTypesOfFields() { assertThat(extractedFields.getSourceFields(), equalTo(new String[] {"src1", "src2"})); } - public void testTimeFieldValue() { - long millis = randomLong(); - SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", new DateTime(millis)).build(); - TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, Collections.singletonList(timeField)); - assertThat(extractedFields.timeFieldValue(hit), equalTo(millis)); - } - public void testStringTimeFieldValue() { long millis = randomLong(); SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", Long.toString(millis)).build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java index 6e256680eca55..f6f75fe722dac 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.Arrays; @@ -316,7 +316,7 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str String timestampFormat = javaTimestampFormats.get(i); switch (timestampFormat) { case "ISO8601": - parsed = DateFormatters.forPattern("strict_date_optional_time_nanos").withZone(defaultZone).parse(text); + parsed = DateFormatter.forPattern("strict_date_optional_time_nanos").withZone(defaultZone).parse(text); break; default: java.time.format.DateTimeFormatter parser = new java.time.format.DateTimeFormatterBuilder() diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java index 505a2b871da0b..ee331a99006ed 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java @@ -289,11 +289,16 @@ private Bucket createBucket(boolean isInterim) { return bucket; } + private Date randomDate() { + // between 1970 and 2065 + return new Date(randomLongBetween(0, 3000000000000L)); + } + private List createRecords(boolean isInterim) { List records = new ArrayList<>(); int count = randomIntBetween(0, 100); - Date now = new Date(randomNonNegativeLong()); + Date now = randomDate(); for (int i=0; i { @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/AutodetectResultTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/AutodetectResultTests.java index bf9a6f0a99f99..8711ef9b35a4a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/AutodetectResultTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/AutodetectResultTests.java @@ -11,6 +11,9 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.output.FlushAcknowledgement; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotTests; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.QuantilesTests; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; @@ -18,12 +21,8 @@ import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.ModelPlot; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotTests; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.QuantilesTests; import java.util.ArrayList; -import java.util.Date; import java.util.List; public class AutodetectResultTests extends AbstractSerializingTestCase { @@ -48,7 +47,7 @@ protected AutodetectResult createTestInstance() { FlushAcknowledgement flushAcknowledgement; String jobId = "foo"; if (randomBoolean()) { - bucket = new Bucket(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong()); + bucket = new Bucket(jobId, randomDate(), randomNonNegativeLong()); } else { bucket = null; } @@ -56,7 +55,7 @@ protected AutodetectResult createTestInstance() { int size = randomInt(10); records = new ArrayList<>(size); for (int i = 0; i < size; i++) { - AnomalyRecord record = new AnomalyRecord(jobId, new Date(randomLong()), randomNonNegativeLong()); + AnomalyRecord record = new AnomalyRecord(jobId, randomDate(), randomNonNegativeLong()); record.setProbability(randomDoubleBetween(0.0, 1.0, true)); records.add(record); } @@ -67,7 +66,7 @@ protected AutodetectResult createTestInstance() { influencers = new ArrayList<>(size); for (int i = 0; i < size; i++) { Influencer influencer = new Influencer(jobId, randomAlphaOfLength(10), randomAlphaOfLength(10), - new Date(randomNonNegativeLong()), randomNonNegativeLong()); + randomDate(), randomNonNegativeLong()); influencer.setProbability(randomDoubleBetween(0.0, 1.0, true)); influencers.add(influencer); } @@ -89,12 +88,13 @@ protected AutodetectResult createTestInstance() { modelSizeStats = null; } if (randomBoolean()) { - modelPlot = new ModelPlot(jobId, new Date(randomLong()), randomNonNegativeLong(), randomInt()); + modelPlot = new ModelPlot(jobId, randomDate(), randomNonNegativeLong(), randomInt()); } else { modelPlot = null; } if (randomBoolean()) { - forecast = new Forecast(jobId, randomAlphaOfLength(20), new Date(randomLong()), randomNonNegativeLong(), randomInt()); + forecast = new Forecast(jobId, randomAlphaOfLength(20), randomDate(), + randomNonNegativeLong(), randomInt()); } else { forecast = null; } @@ -110,7 +110,8 @@ protected AutodetectResult createTestInstance() { categoryDefinition = null; } if (randomBoolean()) { - flushAcknowledgement = new FlushAcknowledgement(randomAlphaOfLengthBetween(1, 20), new Date(randomNonNegativeLong())); + flushAcknowledgement = new FlushAcknowledgement(randomAlphaOfLengthBetween(1, 20), + randomDate()); } else { flushAcknowledgement = null; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java index 65343b0a068ac..a49ef0a5e26fa 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java @@ -33,7 +33,7 @@ public Bucket createTestInstance() { } public Bucket createTestInstance(String jobId) { - Bucket bucket = new Bucket(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong()); + Bucket bucket = new Bucket(jobId, randomDate(), randomNonNegativeLong()); if (randomBoolean()) { bucket.setAnomalyScore(randomDouble()); } @@ -92,7 +92,7 @@ protected Bucket doParseInstance(XContentParser parser) { } public void testEquals_GivenDifferentClass() { - Bucket bucket = new Bucket("foo", new Date(randomLong()), randomNonNegativeLong()); + Bucket bucket = new Bucket("foo", randomDate(), randomNonNegativeLong()); assertFalse(bucket.equals("a string")); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ForecastTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ForecastTests.java index b1d9f37dcb4f2..a5c15716ea293 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ForecastTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ForecastTests.java @@ -26,7 +26,7 @@ protected Forecast createTestInstance() { public Forecast createTestInstance(String jobId) { Forecast forecast = - new Forecast(jobId, randomAlphaOfLength(20), new Date(randomLong()), + new Forecast(jobId, randomAlphaOfLength(20), randomDate(), randomNonNegativeLong(), randomInt()); if (randomBoolean()) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ModelPlotTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ModelPlotTests.java index 2a5ceb8363b8a..37788bfa203d2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ModelPlotTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ModelPlotTests.java @@ -30,7 +30,7 @@ protected ModelPlot createTestInstance() { public ModelPlot createTestInstance(String jobId) { ModelPlot modelPlot = - new ModelPlot(jobId, new Date(randomLong()), randomNonNegativeLong(), randomInt()); + new ModelPlot(jobId, randomDate(), randomNonNegativeLong(), randomInt()); if (randomBoolean()) { modelPlot.setByFieldName(randomAlphaOfLengthBetween(1, 20)); } @@ -73,14 +73,16 @@ protected ModelPlot doParseInstance(XContentParser parser) { public void testEquals_GivenSameObject() { ModelPlot modelPlot = - new ModelPlot(randomAlphaOfLength(15), new Date(randomLong()), randomNonNegativeLong(), randomInt()); + new ModelPlot(randomAlphaOfLength(15), + randomDate(), randomNonNegativeLong(), randomInt()); assertTrue(modelPlot.equals(modelPlot)); } public void testEquals_GivenObjectOfDifferentClass() { ModelPlot modelPlot = - new ModelPlot(randomAlphaOfLength(15), new Date(randomLong()), randomNonNegativeLong(), randomInt()); + new ModelPlot(randomAlphaOfLength(15), + randomDate(), randomNonNegativeLong(), randomInt()); assertFalse(modelPlot.equals("a string")); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/OverallBucketTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/OverallBucketTests.java index 42b29cb5ee224..b6c0a99685d0b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/OverallBucketTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/OverallBucketTests.java @@ -26,7 +26,7 @@ protected OverallBucket createTestInstance() { for (int i = 0; i < jobCount; ++i) { jobs.add(new OverallBucket.JobInfo(JobTests.randomValidJobId(), randomDoubleBetween(0.0, 100.0, true))); } - return new OverallBucket(new Date(randomNonNegativeLong()), + return new OverallBucket(new Date(randomLongBetween(0, 3000000000000L)), randomIntBetween(60, 24 * 3600), randomDoubleBetween(0.0, 100.0, true), jobs, @@ -47,4 +47,4 @@ public void testCompareTo() { assertThat(jobInfo1.compareTo(jobInfo3), lessThan(0)); assertThat(jobInfo2.compareTo(jobInfo3), lessThan(0)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java index 368758654cb9b..647835bf9311e 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java @@ -87,7 +87,8 @@ public static MonitoringBulkDoc randomMonitoringBulkDoc(final Random random, final MonitoredSystem system, final String type) throws IOException { final String id = random.nextBoolean() ? RandomStrings.randomAsciiLettersOfLength(random, 5) : null; - final long timestamp = RandomNumbers.randomLongBetween(random, 0L, Long.MAX_VALUE); + // ending date is the last second of 9999, should be sufficient + final long timestamp = RandomNumbers.randomLongBetween(random, 0L, 253402300799000L); final long interval = RandomNumbers.randomLongBetween(random, 0L, Long.MAX_VALUE); return new MonitoringBulkDoc(system, type, id, timestamp, interval, source, xContentType); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index c23ef3c8ee51c..6caefe148b28a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -61,7 +61,7 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) public class LocalExporterIntegTests extends LocalExporterIntegTestCase { - private final String indexTimeFormat = randomFrom("YY", "YYYY", "YYYY.MM", "YYYY-MM", "MM.YYYY", "MM", null); + private final String indexTimeFormat = randomFrom("yy", "yyyy", "yyyy.MM", "yyyy-MM", "MM.yyyy", "MM", null); private void stopMonitoring() { // Now disabling the monitoring service, so that no more collection are started diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java index 232034177e87b..59141d2a83aeb 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.rollup; -import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -16,7 +16,6 @@ import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; -import org.joda.time.DateTimeZone; import java.util.ArrayList; import java.util.Comparator; @@ -98,7 +97,7 @@ private static void checkDateHisto(DateHistogramAggregationBuilder source, List< DateHistogramInterval interval = new DateHistogramInterval((String)agg.get(RollupField.INTERVAL)); String thisTimezone = (String)agg.get(DateHistogramGroupConfig.TIME_ZONE); - String sourceTimeZone = source.timeZone() == null ? DateTimeZone.UTC.toString() : source.timeZone().toString(); + String sourceTimeZone = source.timeZone() == null ? "UTC" : source.timeZone().toString(); // Ensure we are working on the same timezone if (thisTimezone.equalsIgnoreCase(sourceTimeZone) == false) { @@ -152,10 +151,10 @@ static boolean validateCalendarInterval(DateHistogramInterval requestInterval, // The request must be gte the config. The CALENDAR_ORDERING map values are integers representing // relative orders between the calendar units - DateTimeUnit requestUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(requestInterval.toString()); - long requestOrder = requestUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); - DateTimeUnit configUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(configInterval.toString()); - long configOrder = configUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); + Rounding.DateTimeUnit requestUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(requestInterval.toString()); + long requestOrder = requestUnit.getField().getBaseUnit().getDuration().toMillis(); + Rounding.DateTimeUnit configUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(configInterval.toString()); + long configOrder = configUnit.getField().getBaseUnit().getDuration().toMillis(); // All calendar units are multiples naturally, so we just care about gte return requestOrder >= configOrder; @@ -387,8 +386,8 @@ private static Comparator getComparator() { static long getMillisFixedOrCalendar(String value) { DateHistogramInterval interval = new DateHistogramInterval(value); if (isCalendarInterval(interval)) { - DateTimeUnit intervalUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()); - return intervalUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); + Rounding.DateTimeUnit intervalUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()); + return intervalUnit.getField().getBaseUnit().getDuration().toMillis(); } else { return TimeValue.parseTimeValue(value, "date_histo.comparator.interval").getMillis(); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index ee29e56a33169..1d5f9093a29df 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -28,9 +28,9 @@ import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.indexing.IterationResult; -import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; @@ -42,6 +42,7 @@ import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTimeZone; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -214,7 +215,7 @@ public static List> createValueSourceBuilders(fi final DateHistogramValuesSourceBuilder dateHistogramBuilder = new DateHistogramValuesSourceBuilder(dateHistogramName); dateHistogramBuilder.dateHistogramInterval(dateHistogram.getInterval()); dateHistogramBuilder.field(dateHistogramField); - dateHistogramBuilder.timeZone(toDateTimeZone(dateHistogram.getTimeZone())); + dateHistogramBuilder.timeZone(ZoneId.of(dateHistogram.getTimeZone())); return Collections.singletonList(dateHistogramBuilder); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java index 95161e0d149dc..d05a78e121296 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTimeZone; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -122,14 +123,14 @@ public void testIncompatibleFixedCalendarInterval() { } public void testBadTimeZone() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST")); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "CET")); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") .dateHistogramInterval(new DateHistogramInterval("1h")) - .timeZone(DateTimeZone.UTC); + .timeZone(ZoneOffset.UTC); RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps)); assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " + diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index 3dc91ede1bd2c..0032b5a88a563 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -147,7 +147,7 @@ public void testRangeWrongTZ() { Set caps = new HashSet<>(); caps.add(cap); Exception e = expectThrows(IllegalArgumentException.class, - () -> TransportRollupSearchAction.rewriteQuery(new RangeQueryBuilder("foo").gt(1).timeZone("EST"), caps)); + () -> TransportRollupSearchAction.rewriteQuery(new RangeQueryBuilder("foo").gt(1).timeZone("CET"), caps)); assertThat(e.getMessage(), equalTo("Field [foo] in [range] query was found in rollup indices, but requested timezone is not " + "compatible. Options include: [UTC]")); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java index bd8a0b19f8250..9f8796f4c9589 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTimeZone; +import java.time.zone.ZoneRulesException; import java.util.HashMap; import java.util.Map; @@ -84,9 +85,9 @@ public void testDefaultTimeZone() { } public void testUnknownTimeZone() { - Exception e = expectThrows(IllegalArgumentException.class, + Exception e = expectThrows(ZoneRulesException.class, () -> new DateHistogramGroupConfig("foo", DateHistogramInterval.HOUR, null, "FOO")); - assertThat(e.getMessage(), equalTo("The datetime zone id 'FOO' is not recognised")); + assertThat(e.getMessage(), equalTo("Unknown time-zone ID: FOO")); } public void testEmptyHistoField() { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index b87b1f3761fdb..cdabb36d42760 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; @@ -58,12 +58,14 @@ import org.junit.Before; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; @@ -144,22 +146,22 @@ public void testDateHistoAndMetrics() throws Exception { final List> dataset = new ArrayList<>(); dataset.addAll( Arrays.asList( - asMap("the_histo", asLong("2015-03-31T03:00:00"), "counter", 10), - asMap("the_histo", asLong("2015-03-31T03:20:00"), "counter", 20), - asMap("the_histo", asLong("2015-03-31T03:40:00"), "counter", 20), - asMap("the_histo", asLong("2015-03-31T04:00:00"), "counter", 32), - asMap("the_histo", asLong("2015-03-31T04:20:00"), "counter", 54), - asMap("the_histo", asLong("2015-03-31T04:40:00"), "counter", 55), - asMap("the_histo", asLong("2015-03-31T05:00:00"), "counter", 55), - asMap("the_histo", asLong("2015-03-31T05:00:00"), "counter", 70), - asMap("the_histo", asLong("2015-03-31T05:20:00"), "counter", 70), - asMap("the_histo", asLong("2015-03-31T05:40:00"), "counter", 80), - asMap("the_histo", asLong("2015-03-31T06:00:00"), "counter", 80), - asMap("the_histo", asLong("2015-03-31T06:20:00"), "counter", 90), - asMap("the_histo", asLong("2015-03-31T06:40:00"), "counter", 100), - asMap("the_histo", asLong("2015-03-31T07:00:00"), "counter", 120), - asMap("the_histo", asLong("2015-03-31T07:20:00"), "counter", 120), - asMap("the_histo", asLong("2015-03-31T07:40:00"), "counter", 200) + asMap("the_histo", asLong("2015-03-31T03:00:00.000Z"), "counter", 10), + asMap("the_histo", asLong("2015-03-31T03:20:00.000Z"), "counter", 20), + asMap("the_histo", asLong("2015-03-31T03:40:00.000Z"), "counter", 20), + asMap("the_histo", asLong("2015-03-31T04:00:00.000Z"), "counter", 32), + asMap("the_histo", asLong("2015-03-31T04:20:00.000Z"), "counter", 54), + asMap("the_histo", asLong("2015-03-31T04:40:00.000Z"), "counter", 55), + asMap("the_histo", asLong("2015-03-31T05:00:00.000Z"), "counter", 55), + asMap("the_histo", asLong("2015-03-31T05:00:00.000Z"), "counter", 70), + asMap("the_histo", asLong("2015-03-31T05:20:00.000Z"), "counter", 70), + asMap("the_histo", asLong("2015-03-31T05:40:00.000Z"), "counter", 80), + asMap("the_histo", asLong("2015-03-31T06:00:00.000Z"), "counter", 80), + asMap("the_histo", asLong("2015-03-31T06:20:00.000Z"), "counter", 90), + asMap("the_histo", asLong("2015-03-31T06:40:00.000Z"), "counter", 100), + asMap("the_histo", asLong("2015-03-31T07:00:00.000Z"), "counter", 120), + asMap("the_histo", asLong("2015-03-31T07:20:00.000Z"), "counter", 120), + asMap("the_histo", asLong("2015-03-31T07:40:00.000Z"), "counter", 200) ) ); executeTestCase(dataset, job, System.currentTimeMillis(), (resp) -> { @@ -170,7 +172,7 @@ public void testDateHistoAndMetrics() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00.000Z"), "the_histo.date_histogram.interval", "1h", "the_histo.date_histogram._count", 3, "counter.avg._count", 3.0, @@ -188,7 +190,7 @@ public void testDateHistoAndMetrics() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T04:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T04:00:00.000Z"), "the_histo.date_histogram.interval", "1h", "the_histo.date_histogram._count", 3, "counter.avg._count", 3.0, @@ -206,7 +208,7 @@ public void testDateHistoAndMetrics() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T05:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T05:00:00.000Z"), "the_histo.date_histogram.interval", "1h", "the_histo.date_histogram._count", 4, "counter.avg._count", 4.0, @@ -224,7 +226,7 @@ public void testDateHistoAndMetrics() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T06:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T06:00:00.000Z"), "the_histo.date_histogram.interval", "1h", "the_histo.date_histogram._count", 3, "counter.avg._count", 3.0, @@ -242,7 +244,7 @@ public void testDateHistoAndMetrics() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T07:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T07:00:00.000Z"), "the_histo.date_histogram.interval", "1h", "the_histo.date_histogram._count", 3, "counter.avg._count", 3.0, @@ -326,7 +328,7 @@ public void testSimpleDateHistoWithDelay() throws Exception { public void testSimpleDateHistoWithTimeZone() throws Exception { final List> dataset = new ArrayList<>(); - long now = asLong("2015-04-01T10:00:00"); + long now = asLong("2015-04-01T10:00:00.000Z"); dataset.addAll( Arrays.asList( asMap("the_histo", now - TimeValue.timeValueHours(10).getMillis()), @@ -353,7 +355,7 @@ public void testSimpleDateHistoWithTimeZone() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00.000Z"), "the_histo.date_histogram.interval", "1d", "the_histo.date_histogram._count", 2, "the_histo.date_histogram.time_zone", timeZone.toString(), @@ -372,7 +374,7 @@ public void testSimpleDateHistoWithTimeZone() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00.000Z"), "the_histo.date_histogram.interval", "1d", "the_histo.date_histogram._count", 2, "the_histo.date_histogram.time_zone", timeZone.toString(), @@ -385,7 +387,7 @@ public void testSimpleDateHistoWithTimeZone() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-04-01T03:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-04-01T03:00:00.000Z"), "the_histo.date_histogram.interval", "1d", "the_histo.date_histogram._count", 5, "the_histo.date_histogram.time_zone", timeZone.toString(), @@ -449,7 +451,7 @@ static Map asMap(Object... fields) { } private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(dateTime); } /** @@ -488,7 +490,8 @@ private void executeTestCase(List> docs, RollupJobConfig con private Map createFieldTypes(RollupJobConfig job) { Map fieldTypes = new HashMap<>(); MappedFieldType fieldType = new DateFieldMapper.Builder(job.getGroupConfig().getDateHistogram().getField()) - .dateTimeFormatter(DateFormatter.forPattern(randomFrom("basic_date", "date_optional_time", "epoch_second"))) + .format(randomFrom("basic_date", "date_optional_time", "epoch_second")) + .locale(Locale.ROOT) .build(new Mapper.BuilderContext(settings.getSettings(), new ContentPath(0))) .fieldType(); fieldTypes.put(fieldType.name(), fieldType); @@ -599,7 +602,7 @@ protected void doNextSearch(SearchRequest request, ActionListener securityActionFilter = new SetOnce<>(); private final SetOnce securityIndex = new SetOnce<>(); private final SetOnce indexAuditTrail = new SetOnce<>(); + private final SetOnce groupFactory = new SetOnce<>(); private final List bootstrapChecks; private final List securityExtensions = new ArrayList<>(); @@ -943,11 +945,12 @@ public Map> getTransports(Settings settings, ThreadP return Collections.emptyMap(); } + IPFilter ipFilter = this.ipFilter.get(); Map> transports = new HashMap<>(); transports.put(SecurityField.NAME4, () -> new SecurityNetty4ServerTransport(settings, Version.CURRENT, threadPool, - networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, ipFilter.get(), getSslService())); - transports.put(SecurityField.NIO, () -> new SecurityNioTransport(settings, Version.CURRENT, threadPool, - networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, ipFilter.get(), getSslService())); + networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, ipFilter, getSslService())); + transports.put(SecurityField.NIO, () -> new SecurityNioTransport(settings, Version.CURRENT, threadPool, networkService, + pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, ipFilter, getSslService(), getNioGroupFactory(settings))); return Collections.unmodifiableMap(transports); } @@ -967,7 +970,7 @@ public Map> getHttpTransports(Settings set httpTransports.put(SecurityField.NAME4, () -> new SecurityNetty4HttpServerTransport(settings, networkService, bigArrays, ipFilter.get(), getSslService(), threadPool, xContentRegistry, dispatcher)); httpTransports.put(SecurityField.NIO, () -> new SecurityNioHttpServerTransport(settings, networkService, bigArrays, - pageCacheRecycler, threadPool, xContentRegistry, dispatcher, ipFilter.get(), getSslService())); + pageCacheRecycler, threadPool, xContentRegistry, dispatcher, ipFilter.get(), getSslService(), getNioGroupFactory(settings))); return httpTransports; } @@ -1122,4 +1125,14 @@ public void accept(DiscoveryNode node, ClusterState state) { public void reloadSPI(ClassLoader loader) { securityExtensions.addAll(SecurityExtension.loadExtensions(loader)); } + + private synchronized NioGroupFactory getNioGroupFactory(Settings settings) { + if (groupFactory.get() != null) { + assert groupFactory.get().getSettings().equals(settings) : "Different settings than originally provided"; + return groupFactory.get(); + } else { + groupFactory.set(new NioGroupFactory(settings, logger)); + return groupFactory.get(); + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java index 9d4dfba327e5d..6007ef5fd6d03 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java @@ -7,6 +7,7 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; + import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.LoggingAwareMultiCommand; @@ -221,7 +222,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th Path file = FileUserPasswdStore.resolveFile(env); FileAttributesChecker attributesChecker = new FileAttributesChecker(file); - Map users = new HashMap<>(FileUserPasswdStore.parseFile(file, null, env.settings())); + Map users = FileUserPasswdStore.parseFile(file, null, env.settings()); if (users == null) { throw new UserException(ExitCodes.CONFIG, "Configuration file [" + file + "] is missing"); } @@ -229,6 +230,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist"); } final Hasher hasher = Hasher.resolve(XPackSettings.PASSWORD_HASHING_ALGORITHM.get(env.settings())); + users = new HashMap<>(users); // make modifiable users.put(username, hasher.hash(new SecureString(password))); FileUserPasswdStore.writeFile(users, file); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java index a2dab36be7e76..9e0da2518835d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java @@ -25,6 +25,7 @@ import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.transport.SecurityHttpExceptionHandler; @@ -54,8 +55,8 @@ public class SecurityNioHttpServerTransport extends NioHttpServerTransport { public SecurityNioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, IPFilter ipFilter, - SSLService sslService) { - super(settings, networkService, bigArrays, pageCacheRecycler, threadPool, xContentRegistry, dispatcher); + SSLService sslService, NioGroupFactory nioGroupFactory) { + super(settings, networkService, bigArrays, pageCacheRecycler, threadPool, xContentRegistry, dispatcher, nioGroupFactory); this.securityExceptionHandler = new SecurityHttpExceptionHandler(logger, lifecycle, (c, e) -> super.onException(c, e)); this.ipFilter = ipFilter; this.nioIpFilter = new NioIPFilter(ipFilter, IPFilter.HTTP_PROFILE_NAME); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java index 7fdf946675ffd..a5b64c84c4306 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java @@ -29,6 +29,7 @@ import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TransportSettings; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.elasticsearch.transport.nio.NioTcpChannel; import org.elasticsearch.transport.nio.NioTcpServerChannel; import org.elasticsearch.transport.nio.NioTransport; @@ -76,8 +77,9 @@ public class SecurityNioTransport extends NioTransport { public SecurityNioTransport(Settings settings, Version version, ThreadPool threadPool, NetworkService networkService, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService, @Nullable final IPFilter authenticator, - SSLService sslService) { - super(settings, version, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService); + SSLService sslService, NioGroupFactory groupFactory) { + super(settings, version, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, + groupFactory); this.authenticator = authenticator; this.sslService = sslService; this.sslEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java index 597314b2a6196..19533542686de 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java @@ -5,15 +5,11 @@ */ package org.elasticsearch.integration; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.MappingMetaData; @@ -23,11 +19,9 @@ import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import java.util.Locale; import java.util.Map; import static java.util.Collections.singletonMap; -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -149,33 +143,6 @@ public void testGetIndex() throws Exception { assertThat(response.getIndices(), arrayContaining(index)); } - public void testCreateIndexDeleteInKibanaIndex() throws Exception { - final String index = randomBoolean()? ".kibana" : ".kibana-" + randomAlphaOfLengthBetween(1, 10).toLowerCase(Locale.ENGLISH); - - if (randomBoolean()) { - CreateIndexResponse createIndexResponse = client().filterWithHeader(singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) - .admin().indices().prepareCreate(index).get(); - assertThat(createIndexResponse.isAcknowledged(), is(true)); - } - - IndexResponse response = client() - .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) - .prepareIndex() - .setIndex(index) - .setType("dashboard") - .setSource("foo", "bar") - .setRefreshPolicy(IMMEDIATE) - .get(); - assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); - - DeleteResponse deleteResponse = client() - .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) - .prepareDelete(index, "dashboard", response.getId()) - .get(); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - } - public void testGetMappings() throws Exception { final String index = "logstash-20-12-2015"; final String type = "event"; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java index f2a6212107307..3dd749c86705a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java @@ -49,27 +49,30 @@ protected Settings nodeSettings() { String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); Settings.Builder builder = Settings.builder() - .put(super.nodeSettings()) - .put("xpack.security.http.ssl.enabled", true) - .put("xpack.security.http.ssl.client_authentication", SSLClientAuth.OPTIONAL) - .put("xpack.security.http.ssl.keystore.path", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) - .put("xpack.security.http.ssl.keystore.password", "testnode") - .put("xpack.security.authc.realms.file.file.order", "0") - .put("xpack.security.authc.realms.pki.pki1.order", "1") - .put("xpack.security.authc.realms.pki.pki1.truststore.path", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) - .put("xpack.security.authc.realms.pki.pki1.files.role_mapping", getDataPath("role_mapping.yml")) - .put("transport.profiles.want_client_auth.port", randomClientPortRange) - .put("transport.profiles.want_client_auth.bind_host", "localhost") - .put("transport.profiles.want_client_auth.xpack.security.ssl.keystore.path", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) - .put("transport.profiles.want_client_auth.xpack.security.ssl.keystore.password", "testnode") - .put("transport.profiles.want_client_auth.xpack.security.ssl.client_authentication", SSLClientAuth.OPTIONAL); + .put(super.nodeSettings()) + .put("xpack.security.http.ssl.enabled", true) + .put("xpack.security.http.ssl.client_authentication", SSLClientAuth.OPTIONAL) + .put("xpack.security.http.ssl.key", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .put("xpack.security.http.ssl.certificate", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .put("xpack.security.authc.realms.file.file.order", "0") + .put("xpack.security.authc.realms.pki.pki1.order", "1") + .put("xpack.security.authc.realms.pki.pki1.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) + .put("xpack.security.authc.realms.pki.pki1.files.role_mapping", getDataPath("role_mapping.yml")) + .put("transport.profiles.want_client_auth.port", randomClientPortRange) + .put("transport.profiles.want_client_auth.bind_host", "localhost") + .put("transport.profiles.want_client_auth.xpack.security.ssl.key", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .put("transport.profiles.want_client_auth.xpack.security.ssl.certificate", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .put("transport.profiles.want_client_auth.xpack.security.ssl.client_authentication", SSLClientAuth.OPTIONAL); SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { secureSettings.setString("xpack.security.authc.realms.pki.pki1.truststore.secure_password", "truststore-testnode-only"); - secureSettings.setString("xpack.security.http.ssl.keystore.secure_password", "testnode"); + secureSettings.setString("xpack.security.http.ssl.secure_key_passphrase", "testnode"); + secureSettings.setString("transport.profiles.want_client_auth.xpack.security.ssl.secure_key_passphrase", "testnode"); }); return builder.build(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 6801ffd6bdf84..2d78a8c946772 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -224,10 +224,17 @@ public void testNegativeLookupsAreCached() { return null; }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); + final NativePrivilegeStore nativePrivilegeStore = mock(NativePrivilegeStore.class); + doAnswer((invocationOnMock) -> { + ActionListener> callback = null; + callback = (ActionListener>) invocationOnMock.getArguments()[2]; + callback.onResponse(Collections.emptyList()); + return null; + }).when(nativePrivilegeStore).getPrivileges(isA(Set.class), isA(Set.class), any(ActionListener.class)); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, - mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), + nativePrivilegeStore, Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), new XPackLicenseState(SECURITY_ENABLED_SETTINGS)); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor @@ -257,8 +264,9 @@ public void testNegativeLookupsAreCached() { if (getSuperuserRole && numberOfTimesToCall > 0) { // the superuser role was requested so we get the role descriptors again verify(reservedRolesStore, times(2)).accept(anySetOf(String.class), any(ActionListener.class)); + verify(nativePrivilegeStore).getPrivileges(isA(Set.class),isA(Set.class), any(ActionListener.class)); } - verifyNoMoreInteractions(fileRolesStore, reservedRolesStore, nativeRolesStore); + verifyNoMoreInteractions(fileRolesStore, reservedRolesStore, nativeRolesStore, nativePrivilegeStore); } public void testNegativeLookupsCacheDisabled() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransportTests.java index 2f26456112a67..76048590cea99 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransportTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.nio.NioSelector; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ssl.SSLClientAuth; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -46,6 +47,7 @@ public class SecurityNioHttpServerTransportTests extends ESTestCase { private SSLService sslService; private Environment env; private InetSocketAddress address = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + private NioGroupFactory nioGroupFactory; @Before public void createSSLService() { @@ -67,10 +69,11 @@ public void testDefaultClientAuth() throws IOException { Settings settings = Settings.builder() .put(env.settings()) .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true).build(); + nioGroupFactory = new NioGroupFactory(settings, logger); sslService = new SSLService(settings, env); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); SecurityNioHttpServerTransport.SecurityHttpChannelFactory factory = transport.channelFactory(); SocketChannel socketChannel = mock(SocketChannel.class); when(socketChannel.getRemoteAddress()).thenReturn(address); @@ -88,9 +91,10 @@ public void testOptionalClientAuth() throws IOException { .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) .put("xpack.security.http.ssl.client_authentication", value).build(); sslService = new SSLService(settings, env); + nioGroupFactory = new NioGroupFactory(settings, logger); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); SecurityNioHttpServerTransport.SecurityHttpChannelFactory factory = transport.channelFactory(); SocketChannel socketChannel = mock(SocketChannel.class); @@ -107,10 +111,11 @@ public void testRequiredClientAuth() throws IOException { .put(env.settings()) .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) .put("xpack.security.http.ssl.client_authentication", value).build(); + nioGroupFactory = new NioGroupFactory(settings, logger); sslService = new SSLService(settings, env); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); SecurityNioHttpServerTransport.SecurityHttpChannelFactory factory = transport.channelFactory(); SocketChannel socketChannel = mock(SocketChannel.class); @@ -128,9 +133,10 @@ public void testNoClientAuth() throws IOException { .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) .put("xpack.security.http.ssl.client_authentication", value).build(); sslService = new SSLService(settings, env); + nioGroupFactory = new NioGroupFactory(settings, logger); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); SecurityNioHttpServerTransport.SecurityHttpChannelFactory factory = transport.channelFactory(); SocketChannel socketChannel = mock(SocketChannel.class); @@ -146,9 +152,10 @@ public void testCustomSSLConfiguration() throws IOException { .put(env.settings()) .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true).build(); sslService = new SSLService(settings, env); + nioGroupFactory = new NioGroupFactory(settings, logger); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); SecurityNioHttpServerTransport.SecurityHttpChannelFactory factory = transport.channelFactory(); SocketChannel socketChannel = mock(SocketChannel.class); when(socketChannel.getRemoteAddress()).thenReturn(address); @@ -161,9 +168,10 @@ public void testCustomSSLConfiguration() throws IOException { .put("xpack.security.http.ssl.supported_protocols", "TLSv1.2") .build(); sslService = new SSLService(settings, TestEnvironment.newEnvironment(settings)); + nioGroupFactory = new NioGroupFactory(settings, logger); transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); factory = transport.channelFactory(); channel = factory.createChannel(mock(NioSelector.class), socketChannel); SSLEngine customEngine = SSLEngineUtils.getSSLEngine(channel); @@ -183,11 +191,12 @@ public void testThatExceptionIsThrownWhenConfiguredWithoutSslKey() { .build(); env = TestEnvironment.newEnvironment(settings); sslService = new SSLService(settings, env); + nioGroupFactory = new NioGroupFactory(settings, logger); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService)); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory)); assertThat(e.getMessage(), containsString("key must be provided")); } @@ -202,8 +211,9 @@ public void testNoExceptionWhenConfiguredWithoutSslKeySSLDisabled() { .build(); env = TestEnvironment.newEnvironment(settings); sslService = new SSLService(settings, env); + nioGroupFactory = new NioGroupFactory(settings, logger); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java index 84d68ffd63e63..a30e1329432db 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportSettings; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.elasticsearch.xpack.security.transport.AbstractSimpleSecurityTransportTestCase; import java.util.Collections; @@ -34,7 +35,8 @@ public MockTransportService nioFromThreadPool(Settings settings, ThreadPool thre .put(settings) .put("xpack.security.transport.ssl.enabled", true).build(); Transport transport = new SecurityNioTransport(settings1, version, threadPool, networkService, new MockPageCacheRecycler(settings), - namedWriteableRegistry, new NoneCircuitBreakerService(), null, createSSLService(settings1)) { + namedWriteableRegistry, new NoneCircuitBreakerService(), null, createSSLService(settings1), + new NioGroupFactory(settings, logger)) { @Override public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionProfile profile, diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java index f2a9f9d15343a..73713f91231d6 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java @@ -68,7 +68,7 @@ Tuple>> nextPage(String cursor, RequestMeta meta) thro } boolean queryClose(String cursor) throws SQLException { - return httpClient.queryClose(cursor); + return httpClient.queryClose(cursor, Mode.JDBC); } InfoResponse serverInfo() throws SQLException { diff --git a/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java index 16ed3053ab46f..e7263ba1fc1ce 100644 --- a/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java @@ -28,6 +28,7 @@ import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; /** * Tests specific to multiple nodes. @@ -111,7 +112,7 @@ private void assertCount(RestClient client, int count) throws IOException { expected.put("columns", singletonList(columnInfo(mode, "COUNT(*)", "long", JDBCType.BIGINT, 20))); expected.put("rows", singletonList(singletonList(count))); - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); request.setJsonEntity("{\"query\": \"SELECT COUNT(*) FROM test\"" + mode(mode) + "}"); Map actual = responseToMap(client.performRequest(request)); diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java index 86772a49372ba..d5e7e7cc5084c 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java @@ -32,6 +32,7 @@ import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -180,7 +181,7 @@ private static Map runSql(@Nullable String asUser, String mode, } private static Map runSql(@Nullable String asUser, HttpEntity entity) throws IOException { - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); if (asUser != null) { RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("es-security-runas-user", asUser); diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java index 66492bab8f56f..0b71fe1c88cb1 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java @@ -37,6 +37,7 @@ import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; public class UserFunctionIT extends ESRestTestCase { @@ -178,7 +179,7 @@ private Map runSql(String asUser, String mode, String sql) throw } private Map runSql(String asUser, HttpEntity entity) throws IOException { - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); if (asUser != null) { RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("es-security-runas-user", asUser); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java index 868c9584a0057..e346bfc649b23 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java @@ -28,8 +28,10 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.proto.RequestInfo.CLIENT_IDS; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; +import static org.elasticsearch.xpack.sql.proto.Mode.CLI; public abstract class SqlProtocolTestCase extends ESRestTestCase { @@ -79,62 +81,76 @@ public void testIPs() throws IOException { } public void testDateTimeIntervals() throws IOException { - assertQuery("SELECT INTERVAL '326' YEAR", "INTERVAL '326' YEAR", "interval_year", "P326Y", 7); - assertQuery("SELECT INTERVAL '50' MONTH", "INTERVAL '50' MONTH", "interval_month", "P50M", 7); - assertQuery("SELECT INTERVAL '520' DAY", "INTERVAL '520' DAY", "interval_day", "PT12480H", 23); - assertQuery("SELECT INTERVAL '163' HOUR", "INTERVAL '163' HOUR", "interval_hour", "PT163H", 23); - assertQuery("SELECT INTERVAL '163' MINUTE", "INTERVAL '163' MINUTE", "interval_minute", "PT2H43M", 23); - assertQuery("SELECT INTERVAL '223.16' SECOND", "INTERVAL '223.16' SECOND", "interval_second", "PT3M43.016S", 23); - assertQuery("SELECT INTERVAL '163-11' YEAR TO MONTH", "INTERVAL '163-11' YEAR TO MONTH", "interval_year_to_month", "P163Y11M", 7); - assertQuery("SELECT INTERVAL '163 12' DAY TO HOUR", "INTERVAL '163 12' DAY TO HOUR", "interval_day_to_hour", "PT3924H", 23); + assertQuery("SELECT INTERVAL '326' YEAR", "INTERVAL '326' YEAR", "interval_year", "P326Y", "+326-0", 7); + assertQuery("SELECT INTERVAL '50' MONTH", "INTERVAL '50' MONTH", "interval_month", "P50M", "+0-50", 7); + assertQuery("SELECT INTERVAL '520' DAY", "INTERVAL '520' DAY", "interval_day", "PT12480H", "+520 00:00:00.0", 23); + assertQuery("SELECT INTERVAL '163' HOUR", "INTERVAL '163' HOUR", "interval_hour", "PT163H", "+6 19:00:00.0", 23); + assertQuery("SELECT INTERVAL '163' MINUTE", "INTERVAL '163' MINUTE", "interval_minute", "PT2H43M", "+0 02:43:00.0", 23); + assertQuery("SELECT INTERVAL '223.16' SECOND", "INTERVAL '223.16' SECOND", "interval_second", "PT3M43.016S", "+0 00:03:43.16", 23); + assertQuery("SELECT INTERVAL '163-11' YEAR TO MONTH", "INTERVAL '163-11' YEAR TO MONTH", "interval_year_to_month", "P163Y11M", + "+163-11", 7); + assertQuery("SELECT INTERVAL '163 12' DAY TO HOUR", "INTERVAL '163 12' DAY TO HOUR", "interval_day_to_hour", "PT3924H", + "+163 12:00:00.0", 23); assertQuery("SELECT INTERVAL '163 12:39' DAY TO MINUTE", "INTERVAL '163 12:39' DAY TO MINUTE", "interval_day_to_minute", - "PT3924H39M", 23); + "PT3924H39M", "+163 12:39:00.0", 23); assertQuery("SELECT INTERVAL '163 12:39:59.163' DAY TO SECOND", "INTERVAL '163 12:39:59.163' DAY TO SECOND", - "interval_day_to_second", "PT3924H39M59.163S", 23); + "interval_day_to_second", "PT3924H39M59.163S", "+163 12:39:59.163", 23); assertQuery("SELECT INTERVAL -'163 23:39:56.23' DAY TO SECOND", "INTERVAL -'163 23:39:56.23' DAY TO SECOND", - "interval_day_to_second", "PT-3935H-39M-56.023S", 23); + "interval_day_to_second", "PT-3935H-39M-56.023S", "-163 23:39:56.23", 23); assertQuery("SELECT INTERVAL '163:39' HOUR TO MINUTE", "INTERVAL '163:39' HOUR TO MINUTE", "interval_hour_to_minute", - "PT163H39M", 23); + "PT163H39M", "+6 19:39:00.0", 23); assertQuery("SELECT INTERVAL '163:39:59.163' HOUR TO SECOND", "INTERVAL '163:39:59.163' HOUR TO SECOND", "interval_hour_to_second", - "PT163H39M59.163S", 23); + "PT163H39M59.163S", "+6 19:39:59.163", 23); assertQuery("SELECT INTERVAL '163:59.163' MINUTE TO SECOND", "INTERVAL '163:59.163' MINUTE TO SECOND", "interval_minute_to_second", - "PT2H43M59.163S", 23); + "PT2H43M59.163S", "+0 02:43:59.163", 23); } - @SuppressWarnings({ "unchecked" }) - private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize) throws IOException { + private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize) + throws IOException { + assertQuery(sql, columnName, columnType, columnValue, null, displaySize); + } + + private void assertQuery(String sql, String columnName, String columnType, Object columnValue, Object cliColumnValue, int displaySize) + throws IOException { for (Mode mode : Mode.values()) { - Map response = runSql(mode.toString(), sql); - List columns = (ArrayList) response.get("columns"); - assertEquals(1, columns.size()); + boolean isCliCheck = mode == CLI && cliColumnValue != null; + assertQuery(sql, columnName, columnType, isCliCheck ? cliColumnValue : columnValue, displaySize, mode); + } + } + + @SuppressWarnings({ "unchecked" }) + private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize, Mode mode) + throws IOException { + Map response = runSql(mode.toString(), sql); + List columns = (ArrayList) response.get("columns"); + assertEquals(1, columns.size()); - Map column = (HashMap) columns.get(0); - assertEquals(columnName, column.get("name")); - assertEquals(columnType, column.get("type")); - if (mode != Mode.PLAIN) { - assertEquals(3, column.size()); - assertEquals(displaySize, column.get("display_size")); - } else { - assertEquals(2, column.size()); - } - - List rows = (ArrayList) response.get("rows"); - assertEquals(1, rows.size()); - List row = (ArrayList) rows.get(0); - assertEquals(1, row.size()); + Map column = (HashMap) columns.get(0); + assertEquals(columnName, column.get("name")); + assertEquals(columnType, column.get("type")); + if (Mode.isDriver(mode)) { + assertEquals(3, column.size()); + assertEquals(displaySize, column.get("display_size")); + } else { + assertEquals(2, column.size()); + } + + List rows = (ArrayList) response.get("rows"); + assertEquals(1, rows.size()); + List row = (ArrayList) rows.get(0); + assertEquals(1, row.size()); - // from xcontent we can get float or double, depending on the conversion - // method of the specific xcontent format implementation - if (columnValue instanceof Float && row.get(0) instanceof Double) { - assertEquals(columnValue, (float)((Number) row.get(0)).doubleValue()); - } else { - assertEquals(columnValue, row.get(0)); - } + // from xcontent we can get float or double, depending on the conversion + // method of the specific xcontent format implementation + if (columnValue instanceof Float && row.get(0) instanceof Double) { + assertEquals(columnValue, (float)((Number) row.get(0)).doubleValue()); + } else { + assertEquals(columnValue, row.get(0)); } } private Map runSql(String mode, String sql) throws IOException { - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); String requestContent = "{\"query\":\"" + sql + "\"" + mode(mode) + "}"; String format = randomFrom(XContentType.values()).name().toLowerCase(Locale.ROOT); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java index e91f9fc727238..a5b8f5cb4766a 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.sql.qa.jdbc; import org.apache.logging.log4j.Logger; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.StringUtils; @@ -19,6 +19,8 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; + public abstract class JdbcTestUtils { public static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE"; @@ -131,7 +133,7 @@ public static void logLikeCLI(ResultSet rs, Logger logger) throws SQLException { data.add(entry); } - CliFormatter formatter = new CliFormatter(cols, data); + BasicFormatter formatter = new BasicFormatter(cols, data, CLI); logger.info("\n" + formatter.formatWithHeader(cols, data)); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 331c02ffcb193..6f070491df168 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -49,6 +49,8 @@ * user rather than to the JDBC driver or CLI. */ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTestCase { + + public static String SQL_QUERY_REST_ENDPOINT = org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; /** * Builds that map that is returned in the header for each column. */ @@ -309,7 +311,7 @@ private Map runSql(String mode, String sql, String suffix) throw } protected Map runSql(HttpEntity sql, String suffix) throws IOException { - Request request = new Request("POST", "/_sql" + suffix); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT + suffix); request.addParameter("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. request.addParameter("pretty", "true"); // Improves error reporting readability if (randomBoolean()) { @@ -640,7 +642,7 @@ private Tuple runSqlAsText(String sql, String accept) throws IOE * rather than the {@code format} parameter. */ private Tuple runSqlAsText(String suffix, HttpEntity entity, String accept) throws IOException { - Request request = new Request("POST", "/_sql" + suffix); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT + suffix); request.addParameter("error_trace", "true"); request.setEntity(entity); RequestOptions.Builder options = request.getOptions().toBuilder(); @@ -658,7 +660,7 @@ private Tuple runSqlAsText(String suffix, HttpEntity entity, Str * rather than an {@code Accept} header. */ private Tuple runSqlAsTextFormat(String sql, String format) throws IOException { - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); request.addParameter("error_trace", "true"); request.addParameter("format", format); request.setJsonEntity("{\"query\":\"" + sql + "\"}"); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java index 42f9e59840f86..4d864961e8ecd 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java @@ -26,6 +26,9 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; +import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_STATS_REST_ENDPOINT; +import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_TRANSLATE_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; public abstract class RestSqlUsageTestCase extends ESRestTestCase { @@ -226,7 +229,7 @@ private void index(List docs) throws IOException { } private Map getStats() throws UnsupportedOperationException, IOException { - Request request = new Request("GET", "/_sql/stats"); + Request request = new Request("GET", SQL_STATS_REST_ENDPOINT); Map responseAsMap; try (InputStream content = client().performRequest(request).getEntity().getContent()) { responseAsMap = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); @@ -236,7 +239,7 @@ private Map getStats() throws UnsupportedOperationException, IOE } private void runTranslate(String sql) throws IOException { - Request request = new Request("POST", "/_sql/translate"); + Request request = new Request("POST", SQL_TRANSLATE_REST_ENDPOINT); if (randomBoolean()) { // We default to JSON but we force it randomly for extra coverage request.addParameter("format", "json"); @@ -255,9 +258,10 @@ private void runSql(String sql) throws IOException { String mode = Mode.PLAIN.toString(); if (clientType.equals(ClientType.JDBC.toString())) { mode = Mode.JDBC.toString(); - } - if (clientType.startsWith(ClientType.ODBC.toString())) { + } else if (clientType.startsWith(ClientType.ODBC.toString())) { mode = Mode.ODBC.toString(); + } else if (clientType.equals(ClientType.CLI.toString())) { + mode = Mode.CLI.toString(); } runSql(mode, clientType, sql); @@ -276,7 +280,7 @@ private void assertTranslateQueryMetric(int expected, Map respon } private void runSql(String mode, String restClient, String sql) throws IOException { - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); request.addParameter("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. request.addParameter("pretty", "true"); // Improves error reporting readability if (randomBoolean()) { diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..7a4b68f8d857d --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +e3a95ff3cbd96e2c05b90932c20ca6374cdcdbe9 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index efbb9ada534a5..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad32720fe677becb93a26692338b63754613aa50 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/BasicFormatter.java similarity index 77% rename from x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java rename to x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/BasicFormatter.java index f3f63fd18a275..fec2a3ee621e1 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/BasicFormatter.java @@ -14,26 +14,46 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Objects; +import java.util.function.Function; /** - * Formats {@link SqlQueryResponse} for the CLI. {@linkplain Writeable} so + * Formats {@link SqlQueryResponse} for the CLI and for the TEXT format. {@linkplain Writeable} so * that its state can be saved between pages of results. */ -public class CliFormatter implements Writeable { +public class BasicFormatter implements Writeable { /** * The minimum width for any column in the formatted results. */ private static final int MIN_COLUMN_WIDTH = 15; private int[] width; + + public enum FormatOption { + CLI(Objects::toString), + TEXT(StringUtils::toString); + + private final Function apply; + + FormatOption(Function apply) { + this.apply = l -> l == null ? null : apply.apply(l); + } + + public final String apply(Object l) { + return apply.apply(l); + } + } + + private final FormatOption formatOption; /** - * Create a new {@linkplain CliFormatter} for formatting responses similar + * Create a new {@linkplain BasicFormatter} for formatting responses similar * to the provided columns and rows. */ - public CliFormatter(List columns, List> rows) { + public BasicFormatter(List columns, List> rows, FormatOption formatOption) { // Figure out the column widths: // 1. Start with the widths of the column names + this.formatOption = formatOption; width = new int[columns.size()]; for (int i = 0; i < width.length; i++) { // TODO read the width from the data type? @@ -43,24 +63,24 @@ public CliFormatter(List columns, List> rows) { // 2. Expand columns to fit the largest value for (List row : rows) { for (int i = 0; i < width.length; i++) { - // TODO are we sure toString is correct here? What about dates that come back as longs. - // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 - width[i] = Math.max(width[i], StringUtils.toString(row.get(i)).length()); + width[i] = Math.max(width[i], formatOption.apply(row.get(i)).length()); } } } - public CliFormatter(StreamInput in) throws IOException { + public BasicFormatter(StreamInput in) throws IOException { width = in.readIntArray(); + formatOption = in.readEnum(FormatOption.class); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeIntArray(width); + out.writeEnum(formatOption); } - + /** - * Format the provided {@linkplain SqlQueryResponse} for the CLI + * Format the provided {@linkplain SqlQueryResponse} for the set format * including the header lines. */ public String formatWithHeader(List columns, List> rows) { @@ -103,7 +123,7 @@ public String formatWithHeader(List columns, List> rows } /** - * Format the provided {@linkplain SqlQueryResponse} for the CLI + * Format the provided {@linkplain SqlQueryResponse} for the set format * without the header lines. */ public String formatWithoutHeader(List> rows) { @@ -116,9 +136,7 @@ private String formatWithoutHeader(StringBuilder sb, List> rows) { if (i > 0) { sb.append('|'); } - // TODO are we sure toString is correct here? What about dates that come back as longs. - // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 - String string = StringUtils.toString(row.get(i)); + String string = formatOption.apply(row.get(i)); if (string.length() <= width[i]) { // Pad sb.append(string); @@ -159,12 +177,12 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - CliFormatter that = (CliFormatter) o; - return Arrays.equals(width, that.width); + BasicFormatter that = (BasicFormatter) o; + return Arrays.equals(width, that.width) && formatOption == that.formatOption; } @Override public int hashCode() { - return Arrays.hashCode(width); + return Objects.hash(width, formatOption); } } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java index 7301e86befa02..465b405d01ae8 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java @@ -24,6 +24,7 @@ import static java.util.Collections.unmodifiableList; import static org.elasticsearch.xpack.sql.action.AbstractSqlQueryRequest.CURSOR; +import static org.elasticsearch.xpack.sql.proto.Mode.CLI; /** * Response to perform an sql query @@ -36,6 +37,7 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject private List columns; // TODO investigate reusing Page here - it probably is much more efficient private List> rows; + private static final String INTERVAL_CLASS_NAME = "Interval"; public SqlQueryResponse() { } @@ -173,7 +175,12 @@ public static XContentBuilder value(XContentBuilder builder, Mode mode, Object v ZonedDateTime zdt = (ZonedDateTime) value; // use the ISO format builder.value(StringUtils.toString(zdt)); - } else { + } else if (mode == CLI && value != null && value.getClass().getSuperclass().getSimpleName().equals(INTERVAL_CLASS_NAME)) { + // use the SQL format for intervals when sending back the response for CLI + // all other clients will receive ISO 8601 formatted intervals + builder.value(value.toString()); + } + else { builder.value(value); } return builder; diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java index 4e41dddb46c3c..c9bdc8d670c6d 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java @@ -68,7 +68,7 @@ public void testClearCursorRequestParser() throws IOException { request = generateRequest("{\"cursor\" : \"whatever\", \"client_id\" : \"CLI\"}", SqlClearCursorRequest::fromXContent); - assertEquals("cli", request.clientId()); + assertNull(request.clientId()); assertEquals(Mode.PLAIN, request.mode()); assertEquals("whatever", request.getCursor()); diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java index 5cb8bc0a4cd36..86b5cf6c36ef2 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java @@ -5,26 +5,29 @@ */ package org.elasticsearch.xpack.sql.cli.command; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.cli.CliTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.JreHttpUrlConnection; +import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.sql.SQLException; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; + public class ServerQueryCliCommand extends AbstractServerCliCommand { @Override protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String line) { SqlQueryResponse response = null; HttpClient cliClient = cliSession.getClient(); - CliFormatter cliFormatter; + BasicFormatter formatter; String data; try { response = cliClient.queryInit(line, cliSession.getFetchSize()); - cliFormatter = new CliFormatter(response.columns(), response.rows()); - data = cliFormatter.formatWithHeader(response.columns(), response.rows()); + formatter = new BasicFormatter(response.columns(), response.rows(), CLI); + data = formatter.formatWithHeader(response.columns(), response.rows()); while (true) { handleText(terminal, data); if (response.cursor().isEmpty()) { @@ -36,7 +39,7 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String l terminal.println(cliSession.getFetchSeparator()); } response = cliSession.getClient().nextPage(response.cursor()); - data = cliFormatter.formatWithoutHeader(response.rows()); + data = formatter.formatWithoutHeader(response.rows()); } } catch (SQLException e) { if (JreHttpUrlConnection.SQL_STATE_BAD_SERVER.equals(e.getSQLState())) { @@ -46,7 +49,7 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String l } if (response != null) { try { - cliClient.queryClose(response.cursor()); + cliClient.queryClose(response.cursor(), Mode.CLI); } catch (SQLException ex) { terminal.error("Could not close cursor", ex.getMessage()); } diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java index 498bbf7754c8d..9d4ded4a39c14 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.sql.cli.TestTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.sql.SQLException; @@ -93,14 +94,14 @@ public void testCursorCleanupOnError() throws Exception { cliSession.setFetchSize(15); when(client.queryInit("test query", 15)).thenReturn(fakeResponse("my_cursor1", true, "first")); when(client.nextPage("my_cursor1")).thenThrow(new SQLException("test exception")); - when(client.queryClose("my_cursor1")).thenReturn(true); + when(client.queryClose("my_cursor1", Mode.CLI)).thenReturn(true); ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); assertEquals(" field \n---------------\nfirst \n" + "Bad request [test exception]\n", testTerminal.toString()); verify(client, times(1)).queryInit(eq("test query"), eq(15)); verify(client, times(1)).nextPage(any()); - verify(client, times(1)).queryClose(eq("my_cursor1")); + verify(client, times(1)).queryClose(eq("my_cursor1"), eq(Mode.CLI)); verifyNoMoreInteractions(client); } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java index 4fe6a39820bc7..c3f35aefd65f4 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -36,8 +36,6 @@ import java.util.Collections; import java.util.function.Function; -import static org.elasticsearch.xpack.sql.proto.RequestInfo.CLI; - /** * A specialized high-level REST client with support for SQL-related functions. * Similar to JDBC and the underlying HTTP connection, this class is not thread-safe @@ -65,10 +63,10 @@ public MainResponse serverInfo() throws SQLException { public SqlQueryResponse queryInit(String query, int fetchSize) throws SQLException { // TODO allow customizing the time zone - this is what session set/reset/get should be about - // method called only from CLI. "client_id" is set to "cli" + // method called only from CLI SqlQueryRequest sqlRequest = new SqlQueryRequest(query, Collections.emptyList(), null, ZoneId.of("Z"), fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()), TimeValue.timeValueMillis(cfg.pageTimeout()), - new RequestInfo(Mode.PLAIN, CLI)); + new RequestInfo(Mode.CLI)); return query(sqlRequest); } @@ -77,15 +75,15 @@ public SqlQueryResponse query(SqlQueryRequest sqlRequest) throws SQLException { } public SqlQueryResponse nextPage(String cursor) throws SQLException { - // method called only from CLI. "client_id" is set to "cli" + // method called only from CLI SqlQueryRequest sqlRequest = new SqlQueryRequest(cursor, TimeValue.timeValueMillis(cfg.queryTimeout()), - TimeValue.timeValueMillis(cfg.pageTimeout()), new RequestInfo(Mode.PLAIN, CLI)); + TimeValue.timeValueMillis(cfg.pageTimeout()), new RequestInfo(Mode.CLI)); return post(Protocol.SQL_QUERY_REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); } - public boolean queryClose(String cursor) throws SQLException { + public boolean queryClose(String cursor, Mode mode) throws SQLException { SqlClearCursorResponse response = post(Protocol.CLEAR_CURSOR_REST_ENDPOINT, - new SqlClearCursorRequest(cursor, new RequestInfo(Mode.PLAIN)), + new SqlClearCursorRequest(cursor, new RequestInfo(mode)), SqlClearCursorResponse::fromXContent); return response.isSucceeded(); } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java index 5b4991bfba1c0..59a6e82e9874d 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java @@ -35,6 +35,7 @@ import javax.sql.rowset.serial.SerialException; import static java.util.Collections.emptyMap; +import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; /** * Low-level http client using the built-in {@link HttpURLConnection}. @@ -47,7 +48,8 @@ public class JreHttpUrlConnection implements Closeable { * error. */ public static final String SQL_STATE_BAD_SERVER = "bad_server"; - private static final String SQL_NOT_AVAILABLE_ERROR_MESSAGE = "request [/_sql] contains unrecognized parameter: [mode]"; + private static final String SQL_NOT_AVAILABLE_ERROR_MESSAGE = "request [" + SQL_QUERY_REST_ENDPOINT + + "] contains unrecognized parameter: [mode]"; public static R http(String path, String query, ConnectionConfiguration cfg, Function handler) { final URI uriPath = cfg.baseUri().resolve(path); // update path if needed diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java index a301b41d3887a..26eb4867b3509 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java @@ -12,6 +12,7 @@ * SQL protocol mode */ public enum Mode { + CLI, PLAIN, JDBC, ODBC; diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java index b860832856a82..97a3622a64f1f 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java @@ -13,7 +13,6 @@ import java.util.Set; public class RequestInfo { - public static final String CLI = "cli"; private static final String CANVAS = "canvas"; public static final String ODBC_32 = "odbc32"; private static final String ODBC_64 = "odbc64"; @@ -22,7 +21,6 @@ public class RequestInfo { static { Set clientIds = new HashSet<>(4); - clientIds.add(CLI); clientIds.add(CANVAS); clientIds.add(ODBC_32); clientIds.add(ODBC_64); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java index 9f569206438d2..c80b399d447e6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java @@ -65,6 +65,7 @@ public class SqlPlugin extends Plugin implements ActionPlugin { } break; case PLAIN: + case CLI: if (licenseState.isSqlAllowed() == false) { throw LicenseUtils.newComplianceException(XPackField.SQL); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java index 34c0f1c6d74f7..62963a99b2a98 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -7,7 +7,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.action.SqlQueryResponse; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.session.Cursor; @@ -21,6 +21,8 @@ import java.util.Objects; import java.util.function.Function; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.TEXT; + /** * Templating class for displaying SQL responses in text formats. */ @@ -40,21 +42,21 @@ enum TextFormat { PLAIN_TEXT() { @Override String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { - final CliFormatter formatter; - if (cursor instanceof CliFormatterCursor) { - formatter = ((CliFormatterCursor) cursor).getCliFormatter(); + final BasicFormatter formatter; + if (cursor instanceof TextFormatterCursor) { + formatter = ((TextFormatterCursor) cursor).getFormatter(); return formatter.formatWithoutHeader(response.rows()); } else { - formatter = new CliFormatter(response.columns(), response.rows()); + formatter = new BasicFormatter(response.columns(), response.rows(), TEXT); return formatter.formatWithHeader(response.columns(), response.rows()); } } @Override Cursor wrapCursor(Cursor oldCursor, SqlQueryResponse response) { - CliFormatter formatter = (oldCursor instanceof CliFormatterCursor) ? - ((CliFormatterCursor) oldCursor).getCliFormatter() : new CliFormatter(response.columns(), response.rows()); - return CliFormatterCursor.wrap(super.wrapCursor(oldCursor, response), formatter); + BasicFormatter formatter = (oldCursor instanceof TextFormatterCursor) ? + ((TextFormatterCursor) oldCursor).getFormatter() : new BasicFormatter(response.columns(), response.rows(), TEXT); + return TextFormatterCursor.wrap(super.wrapCursor(oldCursor, response), formatter); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java similarity index 78% rename from x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java rename to x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java index b226e899e4d09..4ab1d77fe21ff 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java @@ -10,7 +10,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.RowSet; @@ -21,31 +21,31 @@ /** * The cursor that wraps all necessary information for textual representation of the result table */ -public class CliFormatterCursor implements Cursor { +public class TextFormatterCursor implements Cursor { public static final String NAME = "f"; private final Cursor delegate; - private final CliFormatter formatter; + private final BasicFormatter formatter; /** * If the newCursor is empty, returns an empty cursor. Otherwise, creates a new - * CliFormatterCursor that wraps the newCursor. + * TextFormatterCursor that wraps the newCursor. */ - public static Cursor wrap(Cursor newCursor, CliFormatter formatter) { + public static Cursor wrap(Cursor newCursor, BasicFormatter formatter) { if (newCursor == EMPTY) { return EMPTY; } - return new CliFormatterCursor(newCursor, formatter); + return new TextFormatterCursor(newCursor, formatter); } - private CliFormatterCursor(Cursor delegate, CliFormatter formatter) { + private TextFormatterCursor(Cursor delegate, BasicFormatter formatter) { this.delegate = delegate; this.formatter = formatter; } - public CliFormatterCursor(StreamInput in) throws IOException { + public TextFormatterCursor(StreamInput in) throws IOException { delegate = in.readNamedWriteable(Cursor.class); - formatter = new CliFormatter(in); + formatter = new BasicFormatter(in); } @Override @@ -54,7 +54,7 @@ public void writeTo(StreamOutput out) throws IOException { formatter.writeTo(out); } - public CliFormatter getCliFormatter() { + public BasicFormatter getFormatter() { return formatter; } @@ -81,7 +81,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - CliFormatterCursor that = (CliFormatterCursor) o; + TextFormatterCursor that = (TextFormatterCursor) o; return Objects.equals(delegate, that.delegate) && Objects.equals(formatter, that.formatter); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java index ada855ec1511d..936f565827980 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.querydsl.agg; -import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.elasticsearch.search.aggregations.bucket.composite.DateHistogramValuesSourceBuilder; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; @@ -42,7 +41,7 @@ private GroupByDateHistogram(String id, String fieldName, ScriptTemplate script, protected CompositeValuesSourceBuilder createSourceBuilder() { return new DateHistogramValuesSourceBuilder(id()) .interval(interval) - .timeZone(DateUtils.zoneIdToDateTimeZone(zoneId)); + .timeZone(zoneId); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java index f8d0393303d64..25989ab0af7d2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractors; import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; import org.elasticsearch.xpack.sql.expression.literal.Intervals; -import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; +import org.elasticsearch.xpack.sql.plugin.TextFormatterCursor; import java.io.ByteArrayOutputStream; import java.io.OutputStream; @@ -47,7 +47,7 @@ public static List getNamedWriteables() { entries.add(new NamedWriteableRegistry.Entry(Cursor.class, EmptyCursor.NAME, in -> Cursor.EMPTY)); entries.add(new NamedWriteableRegistry.Entry(Cursor.class, ScrollCursor.NAME, ScrollCursor::new)); entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CompositeAggregationCursor.NAME, CompositeAggregationCursor::new)); - entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CliFormatterCursor.NAME, CliFormatterCursor::new)); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, TextFormatterCursor.NAME, TextFormatterCursor::new)); // plus all their dependencies entries.addAll(Processors.getNamedWriteables()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/CliFormatterTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/BasicFormatterTests.java similarity index 63% rename from x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/CliFormatterTests.java rename to x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/BasicFormatterTests.java index e1b551d2aeddb..fcab7eedca801 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/CliFormatterTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/BasicFormatterTests.java @@ -6,28 +6,32 @@ package org.elasticsearch.xpack.sql.action; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.Mode; import java.util.Arrays; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; import static org.hamcrest.Matchers.arrayWithSize; -public class CliFormatterTests extends ESTestCase { - private final SqlQueryResponse firstResponse = new SqlQueryResponse("", Mode.PLAIN, +public class BasicFormatterTests extends ESTestCase { + private final FormatOption format = randomFrom(FormatOption.values()); + private final SqlQueryResponse firstResponse = new SqlQueryResponse("", format == CLI ? Mode.CLI : Mode.PLAIN, Arrays.asList( new ColumnInfo("", "foo", "string", 0), new ColumnInfo("", "bar", "long", 15), new ColumnInfo("", "15charwidename!", "double", 25), new ColumnInfo("", "superduperwidename!!!", "double", 25), - new ColumnInfo("", "baz", "keyword", 0)), + new ColumnInfo("", "baz", "keyword", 0), + new ColumnInfo("", "date", "datetime", 24)), Arrays.asList( - Arrays.asList("15charwidedata!", 1, 6.888, 12, "rabbit"), - Arrays.asList("dog", 1.7976931348623157E308, 123124.888, 9912, "goat"))); - private final CliFormatter formatter = new CliFormatter(firstResponse.columns(), firstResponse.rows()); + Arrays.asList("15charwidedata!", 1, 6.888, 12, "rabbit", "1953-09-02T00:00:00.000Z"), + Arrays.asList("dog", 1.7976931348623157E308, 123124.888, 9912, "goat", "2000-03-15T21:34:37.443Z"))); + private final BasicFormatter formatter = new BasicFormatter(firstResponse.columns(), firstResponse.rows(), format); /** - * Tests for {@link CliFormatter#formatWithHeader}, values + * Tests for {@link BasicFormatter#formatWithHeader}, values * of exactly the minimum column size, column names of exactly * the minimum column size, column headers longer than the * minimum column size, and values longer than the minimum @@ -36,24 +40,30 @@ public class CliFormatterTests extends ESTestCase { public void testFormatWithHeader() { String[] result = formatter.formatWithHeader(firstResponse.columns(), firstResponse.rows()).split("\n"); assertThat(result, arrayWithSize(4)); - assertEquals(" foo | bar |15charwidename!|superduperwidename!!!| baz ", result[0]); - assertEquals("---------------+----------------------+---------------+---------------------+---------------", result[1]); - assertEquals("15charwidedata!|1 |6.888 |12 |rabbit ", result[2]); - assertEquals("dog |1.7976931348623157E308|123124.888 |9912 |goat ", result[3]); + assertEquals(" foo | bar |15charwidename!|superduperwidename!!!| baz |" + + " date ", result[0]); + assertEquals("---------------+----------------------+---------------+---------------------+---------------+" + + "------------------------", result[1]); + assertEquals("15charwidedata!|1 |6.888 |12 |rabbit |" + + "1953-09-02T00:00:00.000Z", result[2]); + assertEquals("dog |1.7976931348623157E308|123124.888 |9912 |goat |" + + "2000-03-15T21:34:37.443Z", result[3]); } /** - * Tests for {@link CliFormatter#formatWithoutHeader} and + * Tests for {@link BasicFormatter#formatWithoutHeader} and * truncation of long columns. */ public void testFormatWithoutHeader() { String[] result = formatter.formatWithoutHeader( Arrays.asList( - Arrays.asList("ohnotruncateddata", 4, 1, 77, "wombat"), - Arrays.asList("dog", 2, 123124.888, 9912, "goat"))).split("\n"); + Arrays.asList("ohnotruncateddata", 4, 1, 77, "wombat", "1955-01-21T01:02:03.342Z"), + Arrays.asList("dog", 2, 123124.888, 9912, "goat", "2231-12-31T23:59:59.999Z"))).split("\n"); assertThat(result, arrayWithSize(2)); - assertEquals("ohnotruncatedd~|4 |1 |77 |wombat ", result[0]); - assertEquals("dog |2 |123124.888 |9912 |goat ", result[1]); + assertEquals("ohnotruncatedd~|4 |1 |77 |wombat |" + + "1955-01-21T01:02:03.342Z", result[0]); + assertEquals("dog |2 |123124.888 |9912 |goat |" + + "2231-12-31T23:59:59.999Z", result[1]); } /** diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java index f3e27d852b3fe..67b0f2badd883 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java @@ -13,9 +13,9 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; import org.elasticsearch.xpack.sql.TestUtils; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.action.SqlQueryResponse; -import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; +import org.elasticsearch.xpack.sql.plugin.TextFormatterCursor; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.session.Cursor; @@ -32,6 +32,8 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyZeroInteractions; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.TEXT; public class CursorTests extends ESTestCase { @@ -79,12 +81,20 @@ static Cursor randomNonEmptyCursor() { () -> { SqlQueryResponse response = createRandomSqlResponse(); if (response.columns() != null && response.rows() != null) { - return CliFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), - new CliFormatter(response.columns(), response.rows())); + return TextFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), + new BasicFormatter(response.columns(), response.rows(), CLI)); + } else { + return ScrollCursorTests.randomScrollCursor(); + } + }, + () -> { + SqlQueryResponse response = createRandomSqlResponse(); + if (response.columns() != null && response.rows() != null) { + return TextFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), + new BasicFormatter(response.columns(), response.rows(), TEXT)); } else { return ScrollCursorTests.randomScrollCursor(); } - } ); return cursorSupplier.get(); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java index f8996966fa1f6..cfa429e91e83d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java @@ -60,4 +60,4 @@ protected int doHashCode() { protected boolean doEquals(Object obj) { throw new UnsupportedOperationException(); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index fe6885b1c9d72..dd26db984cdf5 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -31,6 +31,7 @@ dependencies { compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile "org.elasticsearch.plugin:x-pack-ilm:${version}" // watcher deps compile 'com.googlecode.owasp-java-html-sanitizer:owasp-java-html-sanitizer:r239' diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 7b17d7f9973d1..d5eea54a80c85 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -270,7 +270,7 @@ public Collection createComponents(Client client, ClusterService cluster throw new UncheckedIOException(e); } - new WatcherIndexTemplateRegistry(clusterService, threadPool, client); + new WatcherIndexTemplateRegistry(clusterService, threadPool, client, xContentRegistry); // http client httpClient = new HttpClient(settings, getSslService(), cryptoService, clusterService); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java index 3258a7a481bb1..0fdb2b3a17d13 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java @@ -18,13 +18,20 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyUtils; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; import org.elasticsearch.xpack.core.template.TemplateUtils; import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; import java.nio.charset.StandardCharsets; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.Executor; @@ -46,17 +53,23 @@ public class WatcherIndexTemplateRegistry implements ClusterStateListener { TEMPLATE_CONFIG_TRIGGERED_WATCHES, TEMPLATE_CONFIG_WATCH_HISTORY, TEMPLATE_CONFIG_WATCHES }; + public static final PolicyConfig POLICY_WATCH_HISTORY = new PolicyConfig("watch-history-ilm-policy", "/watch-history-ilm-policy.json"); + private static final Logger logger = LogManager.getLogger(WatcherIndexTemplateRegistry.class); private final Client client; private final ThreadPool threadPool; private final TemplateConfig[] indexTemplates; + private final NamedXContentRegistry xContentRegistry; private final ConcurrentMap templateCreationsInProgress = new ConcurrentHashMap<>(); + private final AtomicBoolean historyPolicyCreationInProgress = new AtomicBoolean(); - public WatcherIndexTemplateRegistry(ClusterService clusterService, ThreadPool threadPool, Client client) { + public WatcherIndexTemplateRegistry(ClusterService clusterService, ThreadPool threadPool, Client client, + NamedXContentRegistry xContentRegistry) { this.client = client; this.threadPool = threadPool; this.indexTemplates = TEMPLATE_CONFIGS; + this.xContentRegistry = xContentRegistry; clusterService.addListener(this); } @@ -82,6 +95,7 @@ public void clusterChanged(ClusterChangedEvent event) { if (event.localNodeMaster() || localNodeVersionAfterMaster) { addTemplatesIfMissing(state); + addIndexLifecyclePolicyIfMissing(state); } } @@ -127,6 +141,54 @@ public void onFailure(Exception e) { }); } + // Package visible for testing + LifecyclePolicy loadWatcherHistoryPolicy() { + return LifecyclePolicyUtils.loadPolicy(POLICY_WATCH_HISTORY.policyName, POLICY_WATCH_HISTORY.fileName, xContentRegistry); + } + + private void addIndexLifecyclePolicyIfMissing(ClusterState state) { + if (historyPolicyCreationInProgress.compareAndSet(false, true)) { + final LifecyclePolicy policyOnDisk = loadWatcherHistoryPolicy(); + + Optional maybeMeta = Optional.ofNullable(state.metaData().custom(IndexLifecycleMetadata.TYPE)); + final boolean needsUpdating = maybeMeta + .flatMap(ilmMeta -> Optional.ofNullable(ilmMeta.getPolicies().get(policyOnDisk.getName()))) + .isPresent() == false; // If there is no policy then one needs to be put; + + if (needsUpdating) { + putPolicy(policyOnDisk, historyPolicyCreationInProgress); + } else { + historyPolicyCreationInProgress.set(false); + } + } + } + + private void putPolicy(final LifecyclePolicy policy, final AtomicBoolean creationCheck) { + final Executor executor = threadPool.generic(); + executor.execute(() -> { + PutLifecycleAction.Request request = new PutLifecycleAction.Request(policy); + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, request, + new ActionListener() { + @Override + public void onResponse(PutLifecycleAction.Response response) { + creationCheck.set(false); + if (response.isAcknowledged() == false) { + logger.error("error adding watcher index lifecycle policy [{}], request was not acknowledged", + policy.getName()); + } + } + + @Override + public void onFailure(Exception e) { + creationCheck.set(false); + logger.error(new ParameterizedMessage("error adding watcher index lifecycle policy [{}]", + policy.getName()), e); + } + }, (req, listener) -> new XPackClient(client).ilmClient().putLifecyclePolicy(req, listener)); + }); + } + public static boolean validate(ClusterState state) { return state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME) && state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME) && @@ -153,9 +215,19 @@ public String getTemplateName() { public byte[] load() { String template = TemplateUtils.loadTemplate("/" + fileName + ".json", WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION, - Pattern.quote("${xpack.watcher.template.version}")); + Pattern.quote("${xpack.watcher.template.version}")); assert template != null && template.length() > 0; return template.getBytes(StandardCharsets.UTF_8); } } + public static class PolicyConfig { + + private final String policyName; + private String fileName; + + PolicyConfig(String templateName, String fileName) { + this.policyName = templateName; + this.fileName = fileName; + } + } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java index e93a86b93eb23..60ca2b83b2f85 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -21,20 +22,36 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; import org.junit.Before; import org.mockito.ArgumentCaptor; +import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.elasticsearch.mock.orig.Mockito.verify; import static org.elasticsearch.mock.orig.Mockito.when; @@ -50,6 +67,7 @@ public class WatcherIndexTemplateRegistryTests extends ESTestCase { private WatcherIndexTemplateRegistry registry; + private NamedXContentRegistry xContentRegistry; private Client client; @Before @@ -72,7 +90,13 @@ public void createRegistryAndClient() { }).when(indicesAdminClient).putTemplate(any(PutIndexTemplateRequest.class), any(ActionListener.class)); ClusterService clusterService = mock(ClusterService.class); - registry = new WatcherIndexTemplateRegistry(clusterService, threadPool, client); + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse))); + xContentRegistry = new NamedXContentRegistry(entries); + registry = new WatcherIndexTemplateRegistry(clusterService, threadPool, client, xContentRegistry); } public void testThatNonExistingTemplatesAreAddedImmediately() { @@ -91,6 +115,44 @@ public void testThatNonExistingTemplatesAreAddedImmediately() { verify(client.admin().indices(), times(4)).putTemplate(argumentCaptor.capture(), anyObject()); } + public void testThatNonExistingPoliciesAreAddedImmediately() { + DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), nodes); + registry.clusterChanged(event); + verify(client, times(1)).execute(eq(PutLifecycleAction.INSTANCE), anyObject(), anyObject()); + } + + public void testPolicyAlreadyExists() { + DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + Map policyMap = new HashMap<>(); + LifecyclePolicy policy = registry.loadWatcherHistoryPolicy(); + policyMap.put(policy.getName(), policy); + ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), policyMap, nodes); + registry.clusterChanged(event); + verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), anyObject(), anyObject()); + } + + public void testPolicyAlreadyExistsButDiffers() throws IOException { + DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + Map policyMap = new HashMap<>(); + String policyStr = "{\"phases\":{\"delete\":{\"min_age\":\"1m\",\"actions\":{\"delete\":{}}}}}"; + LifecyclePolicy policy = registry.loadWatcherHistoryPolicy(); + try (XContentParser parser = XContentType.JSON.xContent() + .createParser(xContentRegistry, LoggingDeprecationHandler.THROW_UNSUPPORTED_OPERATION, policyStr)) { + LifecyclePolicy different = LifecyclePolicy.parse(parser, policy.getName()); + policyMap.put(policy.getName(), different); + ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), policyMap, nodes); + registry.clusterChanged(event); + verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), anyObject(), anyObject()); + } + } + public void testThatTemplatesExist() { assertThat(WatcherIndexTemplateRegistry.validate(createClusterState(".watch-history")), is(false)); assertThat(WatcherIndexTemplateRegistry.validate(createClusterState(".watch-history", ".triggered_watches", ".watches")), @@ -141,6 +203,12 @@ public void testThatMissingMasterNodeDoesNothing() { } private ClusterChangedEvent createClusterChangedEvent(List existingTemplateNames, DiscoveryNodes nodes) { + return createClusterChangedEvent(existingTemplateNames, Collections.emptyMap(), nodes); + } + + private ClusterChangedEvent createClusterChangedEvent(List existingTemplateNames, + Map existingPolicies, + DiscoveryNodes nodes) { ClusterChangedEvent event = mock(ClusterChangedEvent.class); when(event.localNodeMaster()).thenReturn(nodes.isLocalNodeElectedMaster()); ClusterState cs = mock(ClusterState.class); @@ -158,6 +226,10 @@ private ClusterChangedEvent createClusterChangedEvent(List existingTempl } when(metaData.getTemplates()).thenReturn(indexTemplates.build()); + + IndexLifecycleMetadata ilmMeta = mock(IndexLifecycleMetadata.class); + when(ilmMeta.getPolicies()).thenReturn(existingPolicies); + when(metaData.custom(anyObject())).thenReturn(ilmMeta); when(cs.metaData()).thenReturn(metaData); return event; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 615fc8fbd08f9..6737de19baa13 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -49,6 +49,7 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycle; import org.elasticsearch.xpack.watcher.history.HistoryStore; import org.elasticsearch.xpack.watcher.notification.email.Authentication; import org.elasticsearch.xpack.watcher.notification.email.Email; @@ -161,6 +162,8 @@ protected List> pluginTypes() { } types.add(CommonAnalysisPlugin.class); + // ILM is required for watcher template index settings + types.add(IndexLifecycle.class); return types; } diff --git a/x-pack/qa/audit-tests/build.gradle b/x-pack/qa/audit-tests/build.gradle index 126e3834bab4a..6afe5f01ae1e3 100644 --- a/x-pack/qa/audit-tests/build.gradle +++ b/x-pack/qa/audit-tests/build.gradle @@ -16,7 +16,7 @@ task copyXPackPluginProps(type: Copy) { // wth is this? project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) integTestCluster { - distribution 'zip' + distribution 'default' setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index d154a1e248633..c6c4634e58b61 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -186,6 +186,10 @@ subprojects { setting 'xpack.watcher.encrypt_sensitive_data', 'true' } + if (version.onOrAfter('6.6.0')) { + setting 'ccr.auto_follow.wait_for_metadata_timeout', '1s' + } + // Old versions of the code contain an invalid assertion that trips // during tests. Versions 5.6.9 and 6.2.4 have been fixed by removing // the assertion, but this is impossible for released versions. diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java new file mode 100644 index 0000000000000..ca41afe6c39da --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ml.job.config.AnalysisConfig; +import org.elasticsearch.client.ml.job.config.DataDescription; +import org.elasticsearch.client.ml.job.config.Detector; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class MlMappingsUpgradeIT extends AbstractUpgradeTestCase { + + private static final String JOB_ID = "ml-mappings-upgrade-job"; + + @Override + protected Collection templatesToWaitFor() { + return Stream.concat(XPackRestTestHelper.ML_POST_V660_TEMPLATES.stream(), + super.templatesToWaitFor().stream()).collect(Collectors.toSet()); + } + + /** + * The purpose of this test is to ensure that when a job is open through a rolling upgrade we upgrade the results + * index mappings when it is assigned to an upgraded node even if no other ML endpoint is called after the upgrade + */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37763") + public void testMappingsUpgrade() throws Exception { + + switch (CLUSTER_TYPE) { + case OLD: + createAndOpenTestJob(); + break; + case MIXED: + // We don't know whether the job is on an old or upgraded node, so cannot assert that the mappings have been upgraded + break; + case UPGRADED: + assertUpgradedMappings(); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + } + + private void createAndOpenTestJob() throws IOException { + + Detector.Builder d = new Detector.Builder("metric", "responsetime"); + d.setByFieldName("airline"); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(d.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueMinutes(10)); + Job.Builder job = new Job.Builder(JOB_ID); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(new DataDescription.Builder()); + + Request putJob = new Request("PUT", "_ml/anomaly_detectors/" + JOB_ID); + putJob.setJsonEntity(Strings.toString(job.build())); + Response response = client().performRequest(putJob); + assertEquals(200, response.getStatusLine().getStatusCode()); + + Request openJob = new Request("POST", "_ml/anomaly_detectors/" + JOB_ID + "/_open"); + response = client().performRequest(openJob); + assertEquals(200, response.getStatusLine().getStatusCode()); + } + + @SuppressWarnings("unchecked") + private void assertUpgradedMappings() throws Exception { + + assertBusy(() -> { + Request getMappings = new Request("GET", AnomalyDetectorsIndex.resultsWriteAlias(JOB_ID) + "/_mappings"); + Response response = client().performRequest(getMappings); + + Map responseLevel = entityAsMap(response); + assertNotNull(responseLevel); + Map indexLevel = (Map) responseLevel.get(".ml-anomalies-shared"); + assertNotNull(indexLevel); + Map mappingsLevel = (Map) indexLevel.get("mappings"); + assertNotNull(mappingsLevel); + Map metaLevel = (Map) mappingsLevel.get("_meta"); + assertEquals(Collections.singletonMap("version", Version.CURRENT.toString()), metaLevel); + Map propertiesLevel = (Map) mappingsLevel.get("properties"); + assertNotNull(propertiesLevel); + // TODO: as the years go by, the field we assert on here should be changed + // to the most recent field we've added that is NOT of type "keyword" + Map fieldLevel = (Map) propertiesLevel.get("multi_bucket_impact"); + assertEquals(Collections.singletonMap("type", "double"), fieldLevel); + }); + } +} diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index e3bbf6e613f4e..664e5f715bbb1 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -32,7 +32,7 @@ integTestCluster { // This is important, so that all the modules are available too. // There are index templates that use token filters that are in analysis-module and // processors are being used that are in ingest-common module. - distribution = 'zip' + distribution = 'default' setupCommand 'setupDummyUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser'